VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 60879

Last change on this file since 60879 was 60874, checked in by vboxsync, 9 years ago

IOMRC.cpp,++: Use IEM for IN and OUT too, cleaning out unnecessary code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 453.6 KB
Line 
1/* $Id: IEMAll.cpp 60874 2016-05-07 17:55:21Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85
86/*********************************************************************************************************************************
87* Header Files *
88*********************************************************************************************************************************/
89#define LOG_GROUP LOG_GROUP_IEM
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/pdm.h>
93#include <VBox/vmm/pgm.h>
94#include <internal/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/tm.h>
99#include <VBox/vmm/dbgf.h>
100#include <VBox/vmm/dbgftrace.h>
101#ifdef VBOX_WITH_RAW_MODE_NOT_R0
102# include <VBox/vmm/patm.h>
103# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
104# include <VBox/vmm/csam.h>
105# endif
106#endif
107#include "IEMInternal.h"
108#ifdef IEM_VERIFICATION_MODE_FULL
109# include <VBox/vmm/rem.h>
110# include <VBox/vmm/mm.h>
111#endif
112#include <VBox/vmm/vm.h>
113#include <VBox/log.h>
114#include <VBox/err.h>
115#include <VBox/param.h>
116#include <VBox/dis.h>
117#include <VBox/disopcode.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123
124/*********************************************************************************************************************************
125* Structures and Typedefs *
126*********************************************************************************************************************************/
127/** @typedef PFNIEMOP
128 * Pointer to an opcode decoder function.
129 */
130
131/** @def FNIEMOP_DEF
132 * Define an opcode decoder function.
133 *
134 * We're using macors for this so that adding and removing parameters as well as
135 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
136 *
137 * @param a_Name The function name.
138 */
139
140
141#if defined(__GNUC__) && defined(RT_ARCH_X86)
142typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
143# define FNIEMOP_DEF(a_Name) \
144 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
145# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
146 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
147# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
148 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
149
150#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
151typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
152# define FNIEMOP_DEF(a_Name) \
153 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
154# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
155 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
156# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
157 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
158
159#elif defined(__GNUC__)
160typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#else
169typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
170# define FNIEMOP_DEF(a_Name) \
171 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
173 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
174# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
175 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
176
177#endif
178
179
180/**
181 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
182 */
183typedef union IEMSELDESC
184{
185 /** The legacy view. */
186 X86DESC Legacy;
187 /** The long mode view. */
188 X86DESC64 Long;
189} IEMSELDESC;
190/** Pointer to a selector descriptor table entry. */
191typedef IEMSELDESC *PIEMSELDESC;
192
193
194/*********************************************************************************************************************************
195* Defined Constants And Macros *
196*********************************************************************************************************************************/
197/** Temporary hack to disable the double execution. Will be removed in favor
198 * of a dedicated execution mode in EM. */
199//#define IEM_VERIFICATION_MODE_NO_REM
200
201/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
202 * due to GCC lacking knowledge about the value range of a switch. */
203#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
204
205/**
206 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
207 * occation.
208 */
209#ifdef LOG_ENABLED
210# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
211 do { \
212 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
213 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
214 } while (0)
215#else
216# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
217 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
218#endif
219
220/**
221 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
222 * occation using the supplied logger statement.
223 *
224 * @param a_LoggerArgs What to log on failure.
225 */
226#ifdef LOG_ENABLED
227# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
228 do { \
229 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
230 /*LogFunc(a_LoggerArgs);*/ \
231 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
232 } while (0)
233#else
234# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
235 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
236#endif
237
238/**
239 * Call an opcode decoder function.
240 *
241 * We're using macors for this so that adding and removing parameters can be
242 * done as we please. See FNIEMOP_DEF.
243 */
244#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
245
246/**
247 * Call a common opcode decoder function taking one extra argument.
248 *
249 * We're using macors for this so that adding and removing parameters can be
250 * done as we please. See FNIEMOP_DEF_1.
251 */
252#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
253
254/**
255 * Call a common opcode decoder function taking one extra argument.
256 *
257 * We're using macors for this so that adding and removing parameters can be
258 * done as we please. See FNIEMOP_DEF_1.
259 */
260#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
261
262/**
263 * Check if we're currently executing in real or virtual 8086 mode.
264 *
265 * @returns @c true if it is, @c false if not.
266 * @param a_pIemCpu The IEM state of the current CPU.
267 */
268#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
269
270/**
271 * Check if we're currently executing in virtual 8086 mode.
272 *
273 * @returns @c true if it is, @c false if not.
274 * @param a_pIemCpu The IEM state of the current CPU.
275 */
276#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
277
278/**
279 * Check if we're currently executing in long mode.
280 *
281 * @returns @c true if it is, @c false if not.
282 * @param a_pIemCpu The IEM state of the current CPU.
283 */
284#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
285
286/**
287 * Check if we're currently executing in real mode.
288 *
289 * @returns @c true if it is, @c false if not.
290 * @param a_pIemCpu The IEM state of the current CPU.
291 */
292#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
293
294/**
295 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
296 * @returns PCCPUMFEATURES
297 * @param a_pIemCpu The IEM state of the current CPU.
298 */
299#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
300
301/**
302 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
303 * @returns PCCPUMFEATURES
304 * @param a_pIemCpu The IEM state of the current CPU.
305 */
306#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
307
308/**
309 * Evaluates to true if we're presenting an Intel CPU to the guest.
310 */
311#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
312
313/**
314 * Evaluates to true if we're presenting an AMD CPU to the guest.
315 */
316#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
317
318/**
319 * Check if the address is canonical.
320 */
321#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
322
323
324/*********************************************************************************************************************************
325* Global Variables *
326*********************************************************************************************************************************/
327extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
328
329
330/** Function table for the ADD instruction. */
331IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
332{
333 iemAImpl_add_u8, iemAImpl_add_u8_locked,
334 iemAImpl_add_u16, iemAImpl_add_u16_locked,
335 iemAImpl_add_u32, iemAImpl_add_u32_locked,
336 iemAImpl_add_u64, iemAImpl_add_u64_locked
337};
338
339/** Function table for the ADC instruction. */
340IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
341{
342 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
343 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
344 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
345 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
346};
347
348/** Function table for the SUB instruction. */
349IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
350{
351 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
352 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
353 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
354 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
355};
356
357/** Function table for the SBB instruction. */
358IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
359{
360 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
361 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
362 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
363 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
364};
365
366/** Function table for the OR instruction. */
367IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
368{
369 iemAImpl_or_u8, iemAImpl_or_u8_locked,
370 iemAImpl_or_u16, iemAImpl_or_u16_locked,
371 iemAImpl_or_u32, iemAImpl_or_u32_locked,
372 iemAImpl_or_u64, iemAImpl_or_u64_locked
373};
374
375/** Function table for the XOR instruction. */
376IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
377{
378 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
379 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
380 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
381 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
382};
383
384/** Function table for the AND instruction. */
385IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
386{
387 iemAImpl_and_u8, iemAImpl_and_u8_locked,
388 iemAImpl_and_u16, iemAImpl_and_u16_locked,
389 iemAImpl_and_u32, iemAImpl_and_u32_locked,
390 iemAImpl_and_u64, iemAImpl_and_u64_locked
391};
392
393/** Function table for the CMP instruction.
394 * @remarks Making operand order ASSUMPTIONS.
395 */
396IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
397{
398 iemAImpl_cmp_u8, NULL,
399 iemAImpl_cmp_u16, NULL,
400 iemAImpl_cmp_u32, NULL,
401 iemAImpl_cmp_u64, NULL
402};
403
404/** Function table for the TEST instruction.
405 * @remarks Making operand order ASSUMPTIONS.
406 */
407IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
408{
409 iemAImpl_test_u8, NULL,
410 iemAImpl_test_u16, NULL,
411 iemAImpl_test_u32, NULL,
412 iemAImpl_test_u64, NULL
413};
414
415/** Function table for the BT instruction. */
416IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
417{
418 NULL, NULL,
419 iemAImpl_bt_u16, NULL,
420 iemAImpl_bt_u32, NULL,
421 iemAImpl_bt_u64, NULL
422};
423
424/** Function table for the BTC instruction. */
425IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
426{
427 NULL, NULL,
428 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
429 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
430 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
431};
432
433/** Function table for the BTR instruction. */
434IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
435{
436 NULL, NULL,
437 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
438 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
439 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
440};
441
442/** Function table for the BTS instruction. */
443IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
444{
445 NULL, NULL,
446 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
447 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
448 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
449};
450
451/** Function table for the BSF instruction. */
452IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
453{
454 NULL, NULL,
455 iemAImpl_bsf_u16, NULL,
456 iemAImpl_bsf_u32, NULL,
457 iemAImpl_bsf_u64, NULL
458};
459
460/** Function table for the BSR instruction. */
461IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
462{
463 NULL, NULL,
464 iemAImpl_bsr_u16, NULL,
465 iemAImpl_bsr_u32, NULL,
466 iemAImpl_bsr_u64, NULL
467};
468
469/** Function table for the IMUL instruction. */
470IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
471{
472 NULL, NULL,
473 iemAImpl_imul_two_u16, NULL,
474 iemAImpl_imul_two_u32, NULL,
475 iemAImpl_imul_two_u64, NULL
476};
477
478/** Group 1 /r lookup table. */
479IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
480{
481 &g_iemAImpl_add,
482 &g_iemAImpl_or,
483 &g_iemAImpl_adc,
484 &g_iemAImpl_sbb,
485 &g_iemAImpl_and,
486 &g_iemAImpl_sub,
487 &g_iemAImpl_xor,
488 &g_iemAImpl_cmp
489};
490
491/** Function table for the INC instruction. */
492IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
493{
494 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
495 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
496 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
497 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
498};
499
500/** Function table for the DEC instruction. */
501IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
502{
503 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
504 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
505 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
506 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
507};
508
509/** Function table for the NEG instruction. */
510IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
511{
512 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
513 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
514 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
515 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
516};
517
518/** Function table for the NOT instruction. */
519IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
520{
521 iemAImpl_not_u8, iemAImpl_not_u8_locked,
522 iemAImpl_not_u16, iemAImpl_not_u16_locked,
523 iemAImpl_not_u32, iemAImpl_not_u32_locked,
524 iemAImpl_not_u64, iemAImpl_not_u64_locked
525};
526
527
528/** Function table for the ROL instruction. */
529IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
530{
531 iemAImpl_rol_u8,
532 iemAImpl_rol_u16,
533 iemAImpl_rol_u32,
534 iemAImpl_rol_u64
535};
536
537/** Function table for the ROR instruction. */
538IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
539{
540 iemAImpl_ror_u8,
541 iemAImpl_ror_u16,
542 iemAImpl_ror_u32,
543 iemAImpl_ror_u64
544};
545
546/** Function table for the RCL instruction. */
547IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
548{
549 iemAImpl_rcl_u8,
550 iemAImpl_rcl_u16,
551 iemAImpl_rcl_u32,
552 iemAImpl_rcl_u64
553};
554
555/** Function table for the RCR instruction. */
556IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
557{
558 iemAImpl_rcr_u8,
559 iemAImpl_rcr_u16,
560 iemAImpl_rcr_u32,
561 iemAImpl_rcr_u64
562};
563
564/** Function table for the SHL instruction. */
565IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
566{
567 iemAImpl_shl_u8,
568 iemAImpl_shl_u16,
569 iemAImpl_shl_u32,
570 iemAImpl_shl_u64
571};
572
573/** Function table for the SHR instruction. */
574IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
575{
576 iemAImpl_shr_u8,
577 iemAImpl_shr_u16,
578 iemAImpl_shr_u32,
579 iemAImpl_shr_u64
580};
581
582/** Function table for the SAR instruction. */
583IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
584{
585 iemAImpl_sar_u8,
586 iemAImpl_sar_u16,
587 iemAImpl_sar_u32,
588 iemAImpl_sar_u64
589};
590
591
592/** Function table for the MUL instruction. */
593IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
594{
595 iemAImpl_mul_u8,
596 iemAImpl_mul_u16,
597 iemAImpl_mul_u32,
598 iemAImpl_mul_u64
599};
600
601/** Function table for the IMUL instruction working implicitly on rAX. */
602IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
603{
604 iemAImpl_imul_u8,
605 iemAImpl_imul_u16,
606 iemAImpl_imul_u32,
607 iemAImpl_imul_u64
608};
609
610/** Function table for the DIV instruction. */
611IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
612{
613 iemAImpl_div_u8,
614 iemAImpl_div_u16,
615 iemAImpl_div_u32,
616 iemAImpl_div_u64
617};
618
619/** Function table for the MUL instruction. */
620IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
621{
622 iemAImpl_idiv_u8,
623 iemAImpl_idiv_u16,
624 iemAImpl_idiv_u32,
625 iemAImpl_idiv_u64
626};
627
628/** Function table for the SHLD instruction */
629IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
630{
631 iemAImpl_shld_u16,
632 iemAImpl_shld_u32,
633 iemAImpl_shld_u64,
634};
635
636/** Function table for the SHRD instruction */
637IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
638{
639 iemAImpl_shrd_u16,
640 iemAImpl_shrd_u32,
641 iemAImpl_shrd_u64,
642};
643
644
645/** Function table for the PUNPCKLBW instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
647/** Function table for the PUNPCKLBD instruction */
648IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
649/** Function table for the PUNPCKLDQ instruction */
650IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
651/** Function table for the PUNPCKLQDQ instruction */
652IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
653
654/** Function table for the PUNPCKHBW instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
656/** Function table for the PUNPCKHBD instruction */
657IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
658/** Function table for the PUNPCKHDQ instruction */
659IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
660/** Function table for the PUNPCKHQDQ instruction */
661IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
662
663/** Function table for the PXOR instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
665/** Function table for the PCMPEQB instruction */
666IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
667/** Function table for the PCMPEQW instruction */
668IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
669/** Function table for the PCMPEQD instruction */
670IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
671
672
673#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
674/** What IEM just wrote. */
675uint8_t g_abIemWrote[256];
676/** How much IEM just wrote. */
677size_t g_cbIemWrote;
678#endif
679
680
681/*********************************************************************************************************************************
682* Internal Functions *
683*********************************************************************************************************************************/
684IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
685IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
686IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
687IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
688/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
689IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
691IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
692IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
693IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
694IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
695IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
696IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
698IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
699IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
700IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
701IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
702IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
703IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
706IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
707IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
708IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
709IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
710IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
711IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
712IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
713IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
714IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
715IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
716IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
717
718#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
719IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
720#endif
721IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
722IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
723
724
725
726/**
727 * Sets the pass up status.
728 *
729 * @returns VINF_SUCCESS.
730 * @param pIemCpu The per CPU IEM state of the calling thread.
731 * @param rcPassUp The pass up status. Must be informational.
732 * VINF_SUCCESS is not allowed.
733 */
734IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
735{
736 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
737
738 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
739 if (rcOldPassUp == VINF_SUCCESS)
740 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
741 /* If both are EM scheduling codes, use EM priority rules. */
742 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
743 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
744 {
745 if (rcPassUp < rcOldPassUp)
746 {
747 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
748 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
749 }
750 else
751 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
752 }
753 /* Override EM scheduling with specific status code. */
754 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
755 {
756 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
757 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
758 }
759 /* Don't override specific status code, first come first served. */
760 else
761 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
762 return VINF_SUCCESS;
763}
764
765
766/**
767 * Calculates the CPU mode.
768 *
769 * This is mainly for updating IEMCPU::enmCpuMode.
770 *
771 * @returns CPU mode.
772 * @param pCtx The register context for the CPU.
773 */
774DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
775{
776 if (CPUMIsGuestIn64BitCodeEx(pCtx))
777 return IEMMODE_64BIT;
778 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
779 return IEMMODE_32BIT;
780 return IEMMODE_16BIT;
781}
782
783
784/**
785 * Initializes the execution state.
786 *
787 * @param pIemCpu The per CPU IEM state.
788 * @param fBypassHandlers Whether to bypass access handlers.
789 *
790 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
791 * side-effects in strict builds.
792 */
793DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
794{
795 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
796 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
797
798 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
799 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
800
801#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
802 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
804 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
805 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
807 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
808 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
809 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
810#endif
811
812#ifdef VBOX_WITH_RAW_MODE_NOT_R0
813 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
814#endif
815 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
816 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
817#ifdef VBOX_STRICT
818 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
819 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
820 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
821 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
822 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
823 pIemCpu->uRexReg = 127;
824 pIemCpu->uRexB = 127;
825 pIemCpu->uRexIndex = 127;
826 pIemCpu->iEffSeg = 127;
827 pIemCpu->offOpcode = 127;
828 pIemCpu->cbOpcode = 127;
829#endif
830
831 pIemCpu->cActiveMappings = 0;
832 pIemCpu->iNextMapping = 0;
833 pIemCpu->rcPassUp = VINF_SUCCESS;
834 pIemCpu->fBypassHandlers = fBypassHandlers;
835#ifdef VBOX_WITH_RAW_MODE_NOT_R0
836 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
837 && pCtx->cs.u64Base == 0
838 && pCtx->cs.u32Limit == UINT32_MAX
839 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
840 if (!pIemCpu->fInPatchCode)
841 CPUMRawLeave(pVCpu, VINF_SUCCESS);
842#endif
843
844#ifdef IEM_VERIFICATION_MODE_FULL
845 pIemCpu->fNoRemSavedByExec = pIemCpu->fNoRem;
846 pIemCpu->fNoRem = true;
847#endif
848}
849
850
851/**
852 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
853 *
854 * @param pIemCpu The per CPU IEM state.
855 */
856DECLINLINE(void) iemUninitExec(PIEMCPU pIemCpu)
857{
858 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
859#ifdef IEM_VERIFICATION_MODE_FULL
860 pIemCpu->fNoRem = pIemCpu->fNoRemSavedByExec;
861#endif
862#ifdef VBOX_STRICT
863 pIemCpu->cbOpcode = 0;
864#else
865 NOREF(pIemCpu);
866#endif
867}
868
869
870/**
871 * Initializes the decoder state.
872 *
873 * @param pIemCpu The per CPU IEM state.
874 * @param fBypassHandlers Whether to bypass access handlers.
875 */
876DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
877{
878 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
879 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
880
881 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
882 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
883
884#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
885 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
886 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
887 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
888 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
889 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
890 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
891 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
892 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
893#endif
894
895#ifdef VBOX_WITH_RAW_MODE_NOT_R0
896 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
897#endif
898 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
899#ifdef IEM_VERIFICATION_MODE_FULL
900 if (pIemCpu->uInjectCpl != UINT8_MAX)
901 pIemCpu->uCpl = pIemCpu->uInjectCpl;
902#endif
903 IEMMODE enmMode = iemCalcCpuMode(pCtx);
904 pIemCpu->enmCpuMode = enmMode;
905 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
906 pIemCpu->enmEffAddrMode = enmMode;
907 if (enmMode != IEMMODE_64BIT)
908 {
909 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
910 pIemCpu->enmEffOpSize = enmMode;
911 }
912 else
913 {
914 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
915 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
916 }
917 pIemCpu->fPrefixes = 0;
918 pIemCpu->uRexReg = 0;
919 pIemCpu->uRexB = 0;
920 pIemCpu->uRexIndex = 0;
921 pIemCpu->iEffSeg = X86_SREG_DS;
922 pIemCpu->offOpcode = 0;
923 pIemCpu->cbOpcode = 0;
924 pIemCpu->cActiveMappings = 0;
925 pIemCpu->iNextMapping = 0;
926 pIemCpu->rcPassUp = VINF_SUCCESS;
927 pIemCpu->fBypassHandlers = fBypassHandlers;
928#ifdef VBOX_WITH_RAW_MODE_NOT_R0
929 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
930 && pCtx->cs.u64Base == 0
931 && pCtx->cs.u32Limit == UINT32_MAX
932 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
933 if (!pIemCpu->fInPatchCode)
934 CPUMRawLeave(pVCpu, VINF_SUCCESS);
935#endif
936
937#ifdef DBGFTRACE_ENABLED
938 switch (enmMode)
939 {
940 case IEMMODE_64BIT:
941 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
942 break;
943 case IEMMODE_32BIT:
944 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
945 break;
946 case IEMMODE_16BIT:
947 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
948 break;
949 }
950#endif
951}
952
953
954/**
955 * Prefetch opcodes the first time when starting executing.
956 *
957 * @returns Strict VBox status code.
958 * @param pIemCpu The IEM state.
959 * @param fBypassHandlers Whether to bypass access handlers.
960 */
961IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
962{
963#ifdef IEM_VERIFICATION_MODE_FULL
964 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
965#endif
966 iemInitDecoder(pIemCpu, fBypassHandlers);
967
968 /*
969 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
970 *
971 * First translate CS:rIP to a physical address.
972 */
973 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
974 uint32_t cbToTryRead;
975 RTGCPTR GCPtrPC;
976 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
977 {
978 cbToTryRead = PAGE_SIZE;
979 GCPtrPC = pCtx->rip;
980 if (!IEM_IS_CANONICAL(GCPtrPC))
981 return iemRaiseGeneralProtectionFault0(pIemCpu);
982 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
983 }
984 else
985 {
986 uint32_t GCPtrPC32 = pCtx->eip;
987 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
988 if (GCPtrPC32 > pCtx->cs.u32Limit)
989 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
990 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
991 if (!cbToTryRead) /* overflowed */
992 {
993 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
994 cbToTryRead = UINT32_MAX;
995 }
996 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
997 Assert(GCPtrPC <= UINT32_MAX);
998 }
999
1000#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1001 /* Allow interpretation of patch manager code blocks since they can for
1002 instance throw #PFs for perfectly good reasons. */
1003 if (pIemCpu->fInPatchCode)
1004 {
1005 size_t cbRead = 0;
1006 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
1007 AssertRCReturn(rc, rc);
1008 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1009 return VINF_SUCCESS;
1010 }
1011#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1012
1013 RTGCPHYS GCPhys;
1014 uint64_t fFlags;
1015 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
1016 if (RT_FAILURE(rc))
1017 {
1018 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1019 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1020 }
1021 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1022 {
1023 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1024 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1025 }
1026 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1027 {
1028 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1029 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1030 }
1031 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1032 /** @todo Check reserved bits and such stuff. PGM is better at doing
1033 * that, so do it when implementing the guest virtual address
1034 * TLB... */
1035
1036#ifdef IEM_VERIFICATION_MODE_FULL
1037 /*
1038 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1039 * instruction.
1040 */
1041 /** @todo optimize this differently by not using PGMPhysRead. */
1042 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1043 pIemCpu->GCPhysOpcodes = GCPhys;
1044 if ( offPrevOpcodes < cbOldOpcodes
1045 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1046 {
1047 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1048 Assert(cbNew <= RT_ELEMENTS(pIemCpu->abOpcode));
1049 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1050 pIemCpu->cbOpcode = cbNew;
1051 return VINF_SUCCESS;
1052 }
1053#endif
1054
1055 /*
1056 * Read the bytes at this address.
1057 */
1058 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1059#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1060 size_t cbActual;
1061 if ( PATMIsEnabled(pVM)
1062 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1063 {
1064 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1065 Assert(cbActual > 0);
1066 pIemCpu->cbOpcode = (uint8_t)cbActual;
1067 }
1068 else
1069#endif
1070 {
1071 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1072 if (cbToTryRead > cbLeftOnPage)
1073 cbToTryRead = cbLeftOnPage;
1074 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1075 cbToTryRead = sizeof(pIemCpu->abOpcode);
1076
1077 if (!pIemCpu->fBypassHandlers)
1078 {
1079 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1080 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1081 { /* likely */ }
1082 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1083 {
1084 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1085 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1086 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1087 }
1088 else
1089 {
1090 Log((RT_SUCCESS(rcStrict)
1091 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1092 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1093 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1094 return rcStrict;
1095 }
1096 }
1097 else
1098 {
1099 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1100 if (RT_SUCCESS(rc))
1101 { /* likely */ }
1102 else
1103 {
1104 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1105 GCPtrPC, GCPhys, rc, cbToTryRead));
1106 return rc;
1107 }
1108 }
1109 pIemCpu->cbOpcode = cbToTryRead;
1110 }
1111
1112 return VINF_SUCCESS;
1113}
1114
1115
1116/**
1117 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1118 * exception if it fails.
1119 *
1120 * @returns Strict VBox status code.
1121 * @param pIemCpu The IEM state.
1122 * @param cbMin The minimum number of bytes relative offOpcode
1123 * that must be read.
1124 */
1125IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1126{
1127 /*
1128 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1129 *
1130 * First translate CS:rIP to a physical address.
1131 */
1132 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1133 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1134 uint32_t cbToTryRead;
1135 RTGCPTR GCPtrNext;
1136 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1137 {
1138 cbToTryRead = PAGE_SIZE;
1139 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1140 if (!IEM_IS_CANONICAL(GCPtrNext))
1141 return iemRaiseGeneralProtectionFault0(pIemCpu);
1142 }
1143 else
1144 {
1145 uint32_t GCPtrNext32 = pCtx->eip;
1146 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1147 GCPtrNext32 += pIemCpu->cbOpcode;
1148 if (GCPtrNext32 > pCtx->cs.u32Limit)
1149 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1150 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1151 if (!cbToTryRead) /* overflowed */
1152 {
1153 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1154 cbToTryRead = UINT32_MAX;
1155 /** @todo check out wrapping around the code segment. */
1156 }
1157 if (cbToTryRead < cbMin - cbLeft)
1158 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1159 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1160 }
1161
1162 /* Only read up to the end of the page, and make sure we don't read more
1163 than the opcode buffer can hold. */
1164 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1165 if (cbToTryRead > cbLeftOnPage)
1166 cbToTryRead = cbLeftOnPage;
1167 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1168 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1169/** @todo r=bird: Convert assertion into undefined opcode exception? */
1170 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1171
1172#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1173 /* Allow interpretation of patch manager code blocks since they can for
1174 instance throw #PFs for perfectly good reasons. */
1175 if (pIemCpu->fInPatchCode)
1176 {
1177 size_t cbRead = 0;
1178 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1179 AssertRCReturn(rc, rc);
1180 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1181 return VINF_SUCCESS;
1182 }
1183#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1184
1185 RTGCPHYS GCPhys;
1186 uint64_t fFlags;
1187 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1188 if (RT_FAILURE(rc))
1189 {
1190 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1191 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1192 }
1193 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1194 {
1195 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1196 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1197 }
1198 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1199 {
1200 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1201 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1202 }
1203 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1204 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1205 /** @todo Check reserved bits and such stuff. PGM is better at doing
1206 * that, so do it when implementing the guest virtual address
1207 * TLB... */
1208
1209 /*
1210 * Read the bytes at this address.
1211 *
1212 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1213 * and since PATM should only patch the start of an instruction there
1214 * should be no need to check again here.
1215 */
1216 if (!pIemCpu->fBypassHandlers)
1217 {
1218 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1219 cbToTryRead, PGMACCESSORIGIN_IEM);
1220 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1221 { /* likely */ }
1222 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1223 {
1224 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1225 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1226 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1227 }
1228 else
1229 {
1230 Log((RT_SUCCESS(rcStrict)
1231 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1232 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1233 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1234 return rcStrict;
1235 }
1236 }
1237 else
1238 {
1239 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1240 if (RT_SUCCESS(rc))
1241 { /* likely */ }
1242 else
1243 {
1244 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1245 return rc;
1246 }
1247 }
1248 pIemCpu->cbOpcode += cbToTryRead;
1249 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1250
1251 return VINF_SUCCESS;
1252}
1253
1254
1255/**
1256 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1257 *
1258 * @returns Strict VBox status code.
1259 * @param pIemCpu The IEM state.
1260 * @param pb Where to return the opcode byte.
1261 */
1262DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1263{
1264 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1265 if (rcStrict == VINF_SUCCESS)
1266 {
1267 uint8_t offOpcode = pIemCpu->offOpcode;
1268 *pb = pIemCpu->abOpcode[offOpcode];
1269 pIemCpu->offOpcode = offOpcode + 1;
1270 }
1271 else
1272 *pb = 0;
1273 return rcStrict;
1274}
1275
1276
1277/**
1278 * Fetches the next opcode byte.
1279 *
1280 * @returns Strict VBox status code.
1281 * @param pIemCpu The IEM state.
1282 * @param pu8 Where to return the opcode byte.
1283 */
1284DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1285{
1286 uint8_t const offOpcode = pIemCpu->offOpcode;
1287 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1288 {
1289 *pu8 = pIemCpu->abOpcode[offOpcode];
1290 pIemCpu->offOpcode = offOpcode + 1;
1291 return VINF_SUCCESS;
1292 }
1293 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1294}
1295
1296
1297/**
1298 * Fetches the next opcode byte, returns automatically on failure.
1299 *
1300 * @param a_pu8 Where to return the opcode byte.
1301 * @remark Implicitly references pIemCpu.
1302 */
1303#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1304 do \
1305 { \
1306 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1307 if (rcStrict2 != VINF_SUCCESS) \
1308 return rcStrict2; \
1309 } while (0)
1310
1311
1312/**
1313 * Fetches the next signed byte from the opcode stream.
1314 *
1315 * @returns Strict VBox status code.
1316 * @param pIemCpu The IEM state.
1317 * @param pi8 Where to return the signed byte.
1318 */
1319DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1320{
1321 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1322}
1323
1324
1325/**
1326 * Fetches the next signed byte from the opcode stream, returning automatically
1327 * on failure.
1328 *
1329 * @param a_pi8 Where to return the signed byte.
1330 * @remark Implicitly references pIemCpu.
1331 */
1332#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1333 do \
1334 { \
1335 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1336 if (rcStrict2 != VINF_SUCCESS) \
1337 return rcStrict2; \
1338 } while (0)
1339
1340
1341/**
1342 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1343 *
1344 * @returns Strict VBox status code.
1345 * @param pIemCpu The IEM state.
1346 * @param pu16 Where to return the opcode dword.
1347 */
1348DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1349{
1350 uint8_t u8;
1351 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1352 if (rcStrict == VINF_SUCCESS)
1353 *pu16 = (int8_t)u8;
1354 return rcStrict;
1355}
1356
1357
1358/**
1359 * Fetches the next signed byte from the opcode stream, extending it to
1360 * unsigned 16-bit.
1361 *
1362 * @returns Strict VBox status code.
1363 * @param pIemCpu The IEM state.
1364 * @param pu16 Where to return the unsigned word.
1365 */
1366DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1367{
1368 uint8_t const offOpcode = pIemCpu->offOpcode;
1369 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1370 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1371
1372 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1373 pIemCpu->offOpcode = offOpcode + 1;
1374 return VINF_SUCCESS;
1375}
1376
1377
1378/**
1379 * Fetches the next signed byte from the opcode stream and sign-extending it to
1380 * a word, returning automatically on failure.
1381 *
1382 * @param a_pu16 Where to return the word.
1383 * @remark Implicitly references pIemCpu.
1384 */
1385#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1386 do \
1387 { \
1388 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1389 if (rcStrict2 != VINF_SUCCESS) \
1390 return rcStrict2; \
1391 } while (0)
1392
1393
1394/**
1395 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1396 *
1397 * @returns Strict VBox status code.
1398 * @param pIemCpu The IEM state.
1399 * @param pu32 Where to return the opcode dword.
1400 */
1401DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1402{
1403 uint8_t u8;
1404 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1405 if (rcStrict == VINF_SUCCESS)
1406 *pu32 = (int8_t)u8;
1407 return rcStrict;
1408}
1409
1410
1411/**
1412 * Fetches the next signed byte from the opcode stream, extending it to
1413 * unsigned 32-bit.
1414 *
1415 * @returns Strict VBox status code.
1416 * @param pIemCpu The IEM state.
1417 * @param pu32 Where to return the unsigned dword.
1418 */
1419DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1420{
1421 uint8_t const offOpcode = pIemCpu->offOpcode;
1422 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1423 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1424
1425 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1426 pIemCpu->offOpcode = offOpcode + 1;
1427 return VINF_SUCCESS;
1428}
1429
1430
1431/**
1432 * Fetches the next signed byte from the opcode stream and sign-extending it to
1433 * a word, returning automatically on failure.
1434 *
1435 * @param a_pu32 Where to return the word.
1436 * @remark Implicitly references pIemCpu.
1437 */
1438#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1439 do \
1440 { \
1441 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1442 if (rcStrict2 != VINF_SUCCESS) \
1443 return rcStrict2; \
1444 } while (0)
1445
1446
1447/**
1448 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1449 *
1450 * @returns Strict VBox status code.
1451 * @param pIemCpu The IEM state.
1452 * @param pu64 Where to return the opcode qword.
1453 */
1454DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1455{
1456 uint8_t u8;
1457 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1458 if (rcStrict == VINF_SUCCESS)
1459 *pu64 = (int8_t)u8;
1460 return rcStrict;
1461}
1462
1463
1464/**
1465 * Fetches the next signed byte from the opcode stream, extending it to
1466 * unsigned 64-bit.
1467 *
1468 * @returns Strict VBox status code.
1469 * @param pIemCpu The IEM state.
1470 * @param pu64 Where to return the unsigned qword.
1471 */
1472DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1473{
1474 uint8_t const offOpcode = pIemCpu->offOpcode;
1475 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1476 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1477
1478 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1479 pIemCpu->offOpcode = offOpcode + 1;
1480 return VINF_SUCCESS;
1481}
1482
1483
1484/**
1485 * Fetches the next signed byte from the opcode stream and sign-extending it to
1486 * a word, returning automatically on failure.
1487 *
1488 * @param a_pu64 Where to return the word.
1489 * @remark Implicitly references pIemCpu.
1490 */
1491#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1492 do \
1493 { \
1494 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1495 if (rcStrict2 != VINF_SUCCESS) \
1496 return rcStrict2; \
1497 } while (0)
1498
1499
1500/**
1501 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1502 *
1503 * @returns Strict VBox status code.
1504 * @param pIemCpu The IEM state.
1505 * @param pu16 Where to return the opcode word.
1506 */
1507DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1508{
1509 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1510 if (rcStrict == VINF_SUCCESS)
1511 {
1512 uint8_t offOpcode = pIemCpu->offOpcode;
1513 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1514 pIemCpu->offOpcode = offOpcode + 2;
1515 }
1516 else
1517 *pu16 = 0;
1518 return rcStrict;
1519}
1520
1521
1522/**
1523 * Fetches the next opcode word.
1524 *
1525 * @returns Strict VBox status code.
1526 * @param pIemCpu The IEM state.
1527 * @param pu16 Where to return the opcode word.
1528 */
1529DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1530{
1531 uint8_t const offOpcode = pIemCpu->offOpcode;
1532 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1533 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1534
1535 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1536 pIemCpu->offOpcode = offOpcode + 2;
1537 return VINF_SUCCESS;
1538}
1539
1540
1541/**
1542 * Fetches the next opcode word, returns automatically on failure.
1543 *
1544 * @param a_pu16 Where to return the opcode word.
1545 * @remark Implicitly references pIemCpu.
1546 */
1547#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1548 do \
1549 { \
1550 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1551 if (rcStrict2 != VINF_SUCCESS) \
1552 return rcStrict2; \
1553 } while (0)
1554
1555
1556/**
1557 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1558 *
1559 * @returns Strict VBox status code.
1560 * @param pIemCpu The IEM state.
1561 * @param pu32 Where to return the opcode double word.
1562 */
1563DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1564{
1565 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1566 if (rcStrict == VINF_SUCCESS)
1567 {
1568 uint8_t offOpcode = pIemCpu->offOpcode;
1569 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1570 pIemCpu->offOpcode = offOpcode + 2;
1571 }
1572 else
1573 *pu32 = 0;
1574 return rcStrict;
1575}
1576
1577
1578/**
1579 * Fetches the next opcode word, zero extending it to a double word.
1580 *
1581 * @returns Strict VBox status code.
1582 * @param pIemCpu The IEM state.
1583 * @param pu32 Where to return the opcode double word.
1584 */
1585DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1586{
1587 uint8_t const offOpcode = pIemCpu->offOpcode;
1588 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1589 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1590
1591 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1592 pIemCpu->offOpcode = offOpcode + 2;
1593 return VINF_SUCCESS;
1594}
1595
1596
1597/**
1598 * Fetches the next opcode word and zero extends it to a double word, returns
1599 * automatically on failure.
1600 *
1601 * @param a_pu32 Where to return the opcode double word.
1602 * @remark Implicitly references pIemCpu.
1603 */
1604#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1605 do \
1606 { \
1607 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1608 if (rcStrict2 != VINF_SUCCESS) \
1609 return rcStrict2; \
1610 } while (0)
1611
1612
1613/**
1614 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1615 *
1616 * @returns Strict VBox status code.
1617 * @param pIemCpu The IEM state.
1618 * @param pu64 Where to return the opcode quad word.
1619 */
1620DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1621{
1622 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1623 if (rcStrict == VINF_SUCCESS)
1624 {
1625 uint8_t offOpcode = pIemCpu->offOpcode;
1626 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1627 pIemCpu->offOpcode = offOpcode + 2;
1628 }
1629 else
1630 *pu64 = 0;
1631 return rcStrict;
1632}
1633
1634
1635/**
1636 * Fetches the next opcode word, zero extending it to a quad word.
1637 *
1638 * @returns Strict VBox status code.
1639 * @param pIemCpu The IEM state.
1640 * @param pu64 Where to return the opcode quad word.
1641 */
1642DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1643{
1644 uint8_t const offOpcode = pIemCpu->offOpcode;
1645 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1646 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1647
1648 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1649 pIemCpu->offOpcode = offOpcode + 2;
1650 return VINF_SUCCESS;
1651}
1652
1653
1654/**
1655 * Fetches the next opcode word and zero extends it to a quad word, returns
1656 * automatically on failure.
1657 *
1658 * @param a_pu64 Where to return the opcode quad word.
1659 * @remark Implicitly references pIemCpu.
1660 */
1661#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1662 do \
1663 { \
1664 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1665 if (rcStrict2 != VINF_SUCCESS) \
1666 return rcStrict2; \
1667 } while (0)
1668
1669
1670/**
1671 * Fetches the next signed word from the opcode stream.
1672 *
1673 * @returns Strict VBox status code.
1674 * @param pIemCpu The IEM state.
1675 * @param pi16 Where to return the signed word.
1676 */
1677DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1678{
1679 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1680}
1681
1682
1683/**
1684 * Fetches the next signed word from the opcode stream, returning automatically
1685 * on failure.
1686 *
1687 * @param a_pi16 Where to return the signed word.
1688 * @remark Implicitly references pIemCpu.
1689 */
1690#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1691 do \
1692 { \
1693 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1694 if (rcStrict2 != VINF_SUCCESS) \
1695 return rcStrict2; \
1696 } while (0)
1697
1698
1699/**
1700 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1701 *
1702 * @returns Strict VBox status code.
1703 * @param pIemCpu The IEM state.
1704 * @param pu32 Where to return the opcode dword.
1705 */
1706DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1707{
1708 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1709 if (rcStrict == VINF_SUCCESS)
1710 {
1711 uint8_t offOpcode = pIemCpu->offOpcode;
1712 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1713 pIemCpu->abOpcode[offOpcode + 1],
1714 pIemCpu->abOpcode[offOpcode + 2],
1715 pIemCpu->abOpcode[offOpcode + 3]);
1716 pIemCpu->offOpcode = offOpcode + 4;
1717 }
1718 else
1719 *pu32 = 0;
1720 return rcStrict;
1721}
1722
1723
1724/**
1725 * Fetches the next opcode dword.
1726 *
1727 * @returns Strict VBox status code.
1728 * @param pIemCpu The IEM state.
1729 * @param pu32 Where to return the opcode double word.
1730 */
1731DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1732{
1733 uint8_t const offOpcode = pIemCpu->offOpcode;
1734 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1735 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1736
1737 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1738 pIemCpu->abOpcode[offOpcode + 1],
1739 pIemCpu->abOpcode[offOpcode + 2],
1740 pIemCpu->abOpcode[offOpcode + 3]);
1741 pIemCpu->offOpcode = offOpcode + 4;
1742 return VINF_SUCCESS;
1743}
1744
1745
1746/**
1747 * Fetches the next opcode dword, returns automatically on failure.
1748 *
1749 * @param a_pu32 Where to return the opcode dword.
1750 * @remark Implicitly references pIemCpu.
1751 */
1752#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1753 do \
1754 { \
1755 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1756 if (rcStrict2 != VINF_SUCCESS) \
1757 return rcStrict2; \
1758 } while (0)
1759
1760
1761/**
1762 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1763 *
1764 * @returns Strict VBox status code.
1765 * @param pIemCpu The IEM state.
1766 * @param pu64 Where to return the opcode dword.
1767 */
1768DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1769{
1770 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1771 if (rcStrict == VINF_SUCCESS)
1772 {
1773 uint8_t offOpcode = pIemCpu->offOpcode;
1774 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1775 pIemCpu->abOpcode[offOpcode + 1],
1776 pIemCpu->abOpcode[offOpcode + 2],
1777 pIemCpu->abOpcode[offOpcode + 3]);
1778 pIemCpu->offOpcode = offOpcode + 4;
1779 }
1780 else
1781 *pu64 = 0;
1782 return rcStrict;
1783}
1784
1785
1786/**
1787 * Fetches the next opcode dword, zero extending it to a quad word.
1788 *
1789 * @returns Strict VBox status code.
1790 * @param pIemCpu The IEM state.
1791 * @param pu64 Where to return the opcode quad word.
1792 */
1793DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1794{
1795 uint8_t const offOpcode = pIemCpu->offOpcode;
1796 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1797 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1798
1799 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1800 pIemCpu->abOpcode[offOpcode + 1],
1801 pIemCpu->abOpcode[offOpcode + 2],
1802 pIemCpu->abOpcode[offOpcode + 3]);
1803 pIemCpu->offOpcode = offOpcode + 4;
1804 return VINF_SUCCESS;
1805}
1806
1807
1808/**
1809 * Fetches the next opcode dword and zero extends it to a quad word, returns
1810 * automatically on failure.
1811 *
1812 * @param a_pu64 Where to return the opcode quad word.
1813 * @remark Implicitly references pIemCpu.
1814 */
1815#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1816 do \
1817 { \
1818 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1819 if (rcStrict2 != VINF_SUCCESS) \
1820 return rcStrict2; \
1821 } while (0)
1822
1823
1824/**
1825 * Fetches the next signed double word from the opcode stream.
1826 *
1827 * @returns Strict VBox status code.
1828 * @param pIemCpu The IEM state.
1829 * @param pi32 Where to return the signed double word.
1830 */
1831DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1832{
1833 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1834}
1835
1836/**
1837 * Fetches the next signed double word from the opcode stream, returning
1838 * automatically on failure.
1839 *
1840 * @param a_pi32 Where to return the signed double word.
1841 * @remark Implicitly references pIemCpu.
1842 */
1843#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1844 do \
1845 { \
1846 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1847 if (rcStrict2 != VINF_SUCCESS) \
1848 return rcStrict2; \
1849 } while (0)
1850
1851
1852/**
1853 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1854 *
1855 * @returns Strict VBox status code.
1856 * @param pIemCpu The IEM state.
1857 * @param pu64 Where to return the opcode qword.
1858 */
1859DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1860{
1861 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1862 if (rcStrict == VINF_SUCCESS)
1863 {
1864 uint8_t offOpcode = pIemCpu->offOpcode;
1865 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1866 pIemCpu->abOpcode[offOpcode + 1],
1867 pIemCpu->abOpcode[offOpcode + 2],
1868 pIemCpu->abOpcode[offOpcode + 3]);
1869 pIemCpu->offOpcode = offOpcode + 4;
1870 }
1871 else
1872 *pu64 = 0;
1873 return rcStrict;
1874}
1875
1876
1877/**
1878 * Fetches the next opcode dword, sign extending it into a quad word.
1879 *
1880 * @returns Strict VBox status code.
1881 * @param pIemCpu The IEM state.
1882 * @param pu64 Where to return the opcode quad word.
1883 */
1884DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1885{
1886 uint8_t const offOpcode = pIemCpu->offOpcode;
1887 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1888 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1889
1890 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1891 pIemCpu->abOpcode[offOpcode + 1],
1892 pIemCpu->abOpcode[offOpcode + 2],
1893 pIemCpu->abOpcode[offOpcode + 3]);
1894 *pu64 = i32;
1895 pIemCpu->offOpcode = offOpcode + 4;
1896 return VINF_SUCCESS;
1897}
1898
1899
1900/**
1901 * Fetches the next opcode double word and sign extends it to a quad word,
1902 * returns automatically on failure.
1903 *
1904 * @param a_pu64 Where to return the opcode quad word.
1905 * @remark Implicitly references pIemCpu.
1906 */
1907#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1908 do \
1909 { \
1910 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1911 if (rcStrict2 != VINF_SUCCESS) \
1912 return rcStrict2; \
1913 } while (0)
1914
1915
1916/**
1917 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1918 *
1919 * @returns Strict VBox status code.
1920 * @param pIemCpu The IEM state.
1921 * @param pu64 Where to return the opcode qword.
1922 */
1923DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1924{
1925 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1926 if (rcStrict == VINF_SUCCESS)
1927 {
1928 uint8_t offOpcode = pIemCpu->offOpcode;
1929 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1930 pIemCpu->abOpcode[offOpcode + 1],
1931 pIemCpu->abOpcode[offOpcode + 2],
1932 pIemCpu->abOpcode[offOpcode + 3],
1933 pIemCpu->abOpcode[offOpcode + 4],
1934 pIemCpu->abOpcode[offOpcode + 5],
1935 pIemCpu->abOpcode[offOpcode + 6],
1936 pIemCpu->abOpcode[offOpcode + 7]);
1937 pIemCpu->offOpcode = offOpcode + 8;
1938 }
1939 else
1940 *pu64 = 0;
1941 return rcStrict;
1942}
1943
1944
1945/**
1946 * Fetches the next opcode qword.
1947 *
1948 * @returns Strict VBox status code.
1949 * @param pIemCpu The IEM state.
1950 * @param pu64 Where to return the opcode qword.
1951 */
1952DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1953{
1954 uint8_t const offOpcode = pIemCpu->offOpcode;
1955 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1956 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1957
1958 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1959 pIemCpu->abOpcode[offOpcode + 1],
1960 pIemCpu->abOpcode[offOpcode + 2],
1961 pIemCpu->abOpcode[offOpcode + 3],
1962 pIemCpu->abOpcode[offOpcode + 4],
1963 pIemCpu->abOpcode[offOpcode + 5],
1964 pIemCpu->abOpcode[offOpcode + 6],
1965 pIemCpu->abOpcode[offOpcode + 7]);
1966 pIemCpu->offOpcode = offOpcode + 8;
1967 return VINF_SUCCESS;
1968}
1969
1970
1971/**
1972 * Fetches the next opcode quad word, returns automatically on failure.
1973 *
1974 * @param a_pu64 Where to return the opcode quad word.
1975 * @remark Implicitly references pIemCpu.
1976 */
1977#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1978 do \
1979 { \
1980 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1981 if (rcStrict2 != VINF_SUCCESS) \
1982 return rcStrict2; \
1983 } while (0)
1984
1985
1986/** @name Misc Worker Functions.
1987 * @{
1988 */
1989
1990
1991/**
1992 * Validates a new SS segment.
1993 *
1994 * @returns VBox strict status code.
1995 * @param pIemCpu The IEM per CPU instance data.
1996 * @param pCtx The CPU context.
1997 * @param NewSS The new SS selctor.
1998 * @param uCpl The CPL to load the stack for.
1999 * @param pDesc Where to return the descriptor.
2000 */
2001IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
2002{
2003 NOREF(pCtx);
2004
2005 /* Null selectors are not allowed (we're not called for dispatching
2006 interrupts with SS=0 in long mode). */
2007 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2008 {
2009 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2010 return iemRaiseTaskSwitchFault0(pIemCpu);
2011 }
2012
2013 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2014 if ((NewSS & X86_SEL_RPL) != uCpl)
2015 {
2016 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2017 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2018 }
2019
2020 /*
2021 * Read the descriptor.
2022 */
2023 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
2024 if (rcStrict != VINF_SUCCESS)
2025 return rcStrict;
2026
2027 /*
2028 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2029 */
2030 if (!pDesc->Legacy.Gen.u1DescType)
2031 {
2032 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2033 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2034 }
2035
2036 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2037 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2038 {
2039 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2040 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2041 }
2042 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2043 {
2044 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2045 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2046 }
2047
2048 /* Is it there? */
2049 /** @todo testcase: Is this checked before the canonical / limit check below? */
2050 if (!pDesc->Legacy.Gen.u1Present)
2051 {
2052 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2053 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2054 }
2055
2056 return VINF_SUCCESS;
2057}
2058
2059
2060/**
2061 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2062 * not.
2063 *
2064 * @param a_pIemCpu The IEM per CPU data.
2065 * @param a_pCtx The CPU context.
2066 */
2067#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2068# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2069 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2070 ? (a_pCtx)->eflags.u \
2071 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2072#else
2073# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2074 ( (a_pCtx)->eflags.u )
2075#endif
2076
2077/**
2078 * Updates the EFLAGS in the correct manner wrt. PATM.
2079 *
2080 * @param a_pIemCpu The IEM per CPU data.
2081 * @param a_pCtx The CPU context.
2082 * @param a_fEfl The new EFLAGS.
2083 */
2084#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2085# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2086 do { \
2087 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2088 (a_pCtx)->eflags.u = (a_fEfl); \
2089 else \
2090 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2091 } while (0)
2092#else
2093# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2094 do { \
2095 (a_pCtx)->eflags.u = (a_fEfl); \
2096 } while (0)
2097#endif
2098
2099
2100/** @} */
2101
2102/** @name Raising Exceptions.
2103 *
2104 * @{
2105 */
2106
2107/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2108 * @{ */
2109/** CPU exception. */
2110#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2111/** External interrupt (from PIC, APIC, whatever). */
2112#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2113/** Software interrupt (int or into, not bound).
2114 * Returns to the following instruction */
2115#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2116/** Takes an error code. */
2117#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2118/** Takes a CR2. */
2119#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2120/** Generated by the breakpoint instruction. */
2121#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2122/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2123#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2124/** @} */
2125
2126
2127/**
2128 * Loads the specified stack far pointer from the TSS.
2129 *
2130 * @returns VBox strict status code.
2131 * @param pIemCpu The IEM per CPU instance data.
2132 * @param pCtx The CPU context.
2133 * @param uCpl The CPL to load the stack for.
2134 * @param pSelSS Where to return the new stack segment.
2135 * @param puEsp Where to return the new stack pointer.
2136 */
2137IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2138 PRTSEL pSelSS, uint32_t *puEsp)
2139{
2140 VBOXSTRICTRC rcStrict;
2141 Assert(uCpl < 4);
2142
2143 switch (pCtx->tr.Attr.n.u4Type)
2144 {
2145 /*
2146 * 16-bit TSS (X86TSS16).
2147 */
2148 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2149 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2150 {
2151 uint32_t off = uCpl * 4 + 2;
2152 if (off + 4 <= pCtx->tr.u32Limit)
2153 {
2154 /** @todo check actual access pattern here. */
2155 uint32_t u32Tmp = 0; /* gcc maybe... */
2156 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2157 if (rcStrict == VINF_SUCCESS)
2158 {
2159 *puEsp = RT_LOWORD(u32Tmp);
2160 *pSelSS = RT_HIWORD(u32Tmp);
2161 return VINF_SUCCESS;
2162 }
2163 }
2164 else
2165 {
2166 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2167 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2168 }
2169 break;
2170 }
2171
2172 /*
2173 * 32-bit TSS (X86TSS32).
2174 */
2175 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2176 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2177 {
2178 uint32_t off = uCpl * 8 + 4;
2179 if (off + 7 <= pCtx->tr.u32Limit)
2180 {
2181/** @todo check actual access pattern here. */
2182 uint64_t u64Tmp;
2183 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2184 if (rcStrict == VINF_SUCCESS)
2185 {
2186 *puEsp = u64Tmp & UINT32_MAX;
2187 *pSelSS = (RTSEL)(u64Tmp >> 32);
2188 return VINF_SUCCESS;
2189 }
2190 }
2191 else
2192 {
2193 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2194 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2195 }
2196 break;
2197 }
2198
2199 default:
2200 AssertFailed();
2201 rcStrict = VERR_IEM_IPE_4;
2202 break;
2203 }
2204
2205 *puEsp = 0; /* make gcc happy */
2206 *pSelSS = 0; /* make gcc happy */
2207 return rcStrict;
2208}
2209
2210
2211/**
2212 * Loads the specified stack pointer from the 64-bit TSS.
2213 *
2214 * @returns VBox strict status code.
2215 * @param pIemCpu The IEM per CPU instance data.
2216 * @param pCtx The CPU context.
2217 * @param uCpl The CPL to load the stack for.
2218 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2219 * @param puRsp Where to return the new stack pointer.
2220 */
2221IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2222{
2223 Assert(uCpl < 4);
2224 Assert(uIst < 8);
2225 *puRsp = 0; /* make gcc happy */
2226
2227 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2228
2229 uint32_t off;
2230 if (uIst)
2231 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2232 else
2233 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2234 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2235 {
2236 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2237 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2238 }
2239
2240 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2241}
2242
2243
2244/**
2245 * Adjust the CPU state according to the exception being raised.
2246 *
2247 * @param pCtx The CPU context.
2248 * @param u8Vector The exception that has been raised.
2249 */
2250DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2251{
2252 switch (u8Vector)
2253 {
2254 case X86_XCPT_DB:
2255 pCtx->dr[7] &= ~X86_DR7_GD;
2256 break;
2257 /** @todo Read the AMD and Intel exception reference... */
2258 }
2259}
2260
2261
2262/**
2263 * Implements exceptions and interrupts for real mode.
2264 *
2265 * @returns VBox strict status code.
2266 * @param pIemCpu The IEM per CPU instance data.
2267 * @param pCtx The CPU context.
2268 * @param cbInstr The number of bytes to offset rIP by in the return
2269 * address.
2270 * @param u8Vector The interrupt / exception vector number.
2271 * @param fFlags The flags.
2272 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2273 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2274 */
2275IEM_STATIC VBOXSTRICTRC
2276iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2277 PCPUMCTX pCtx,
2278 uint8_t cbInstr,
2279 uint8_t u8Vector,
2280 uint32_t fFlags,
2281 uint16_t uErr,
2282 uint64_t uCr2)
2283{
2284 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2285 NOREF(uErr); NOREF(uCr2);
2286
2287 /*
2288 * Read the IDT entry.
2289 */
2290 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2291 {
2292 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2293 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2294 }
2295 RTFAR16 Idte;
2296 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2297 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2298 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2299 return rcStrict;
2300
2301 /*
2302 * Push the stack frame.
2303 */
2304 uint16_t *pu16Frame;
2305 uint64_t uNewRsp;
2306 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2307 if (rcStrict != VINF_SUCCESS)
2308 return rcStrict;
2309
2310 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2311#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2312 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2313 if (pIemCpu->uTargetCpu <= IEMTARGETCPU_186)
2314 fEfl |= UINT16_C(0xf000);
2315#endif
2316 pu16Frame[2] = (uint16_t)fEfl;
2317 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2318 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2319 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2320 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2321 return rcStrict;
2322
2323 /*
2324 * Load the vector address into cs:ip and make exception specific state
2325 * adjustments.
2326 */
2327 pCtx->cs.Sel = Idte.sel;
2328 pCtx->cs.ValidSel = Idte.sel;
2329 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2330 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2331 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2332 pCtx->rip = Idte.off;
2333 fEfl &= ~X86_EFL_IF;
2334 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2335
2336 /** @todo do we actually do this in real mode? */
2337 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2338 iemRaiseXcptAdjustState(pCtx, u8Vector);
2339
2340 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2341}
2342
2343
2344/**
2345 * Loads a NULL data selector into when coming from V8086 mode.
2346 *
2347 * @param pIemCpu The IEM per CPU instance data.
2348 * @param pSReg Pointer to the segment register.
2349 */
2350IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2351{
2352 pSReg->Sel = 0;
2353 pSReg->ValidSel = 0;
2354 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2355 {
2356 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2357 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2358 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2359 }
2360 else
2361 {
2362 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2363 /** @todo check this on AMD-V */
2364 pSReg->u64Base = 0;
2365 pSReg->u32Limit = 0;
2366 }
2367}
2368
2369
2370/**
2371 * Loads a segment selector during a task switch in V8086 mode.
2372 *
2373 * @param pIemCpu The IEM per CPU instance data.
2374 * @param pSReg Pointer to the segment register.
2375 * @param uSel The selector value to load.
2376 */
2377IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2378{
2379 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2380 pSReg->Sel = uSel;
2381 pSReg->ValidSel = uSel;
2382 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2383 pSReg->u64Base = uSel << 4;
2384 pSReg->u32Limit = 0xffff;
2385 pSReg->Attr.u = 0xf3;
2386}
2387
2388
2389/**
2390 * Loads a NULL data selector into a selector register, both the hidden and
2391 * visible parts, in protected mode.
2392 *
2393 * @param pIemCpu The IEM state of the calling EMT.
2394 * @param pSReg Pointer to the segment register.
2395 * @param uRpl The RPL.
2396 */
2397IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2398{
2399 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2400 * data selector in protected mode. */
2401 pSReg->Sel = uRpl;
2402 pSReg->ValidSel = uRpl;
2403 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2404 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2405 {
2406 /* VT-x (Intel 3960x) observed doing something like this. */
2407 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2408 pSReg->u32Limit = UINT32_MAX;
2409 pSReg->u64Base = 0;
2410 }
2411 else
2412 {
2413 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2414 pSReg->u32Limit = 0;
2415 pSReg->u64Base = 0;
2416 }
2417}
2418
2419
2420/**
2421 * Loads a segment selector during a task switch in protected mode.
2422 *
2423 * In this task switch scenario, we would throw \#TS exceptions rather than
2424 * \#GPs.
2425 *
2426 * @returns VBox strict status code.
2427 * @param pIemCpu The IEM per CPU instance data.
2428 * @param pSReg Pointer to the segment register.
2429 * @param uSel The new selector value.
2430 *
2431 * @remarks This does _not_ handle CS or SS.
2432 * @remarks This expects pIemCpu->uCpl to be up to date.
2433 */
2434IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2435{
2436 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2437
2438 /* Null data selector. */
2439 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2440 {
2441 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2442 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2443 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2444 return VINF_SUCCESS;
2445 }
2446
2447 /* Fetch the descriptor. */
2448 IEMSELDESC Desc;
2449 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2450 if (rcStrict != VINF_SUCCESS)
2451 {
2452 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2453 VBOXSTRICTRC_VAL(rcStrict)));
2454 return rcStrict;
2455 }
2456
2457 /* Must be a data segment or readable code segment. */
2458 if ( !Desc.Legacy.Gen.u1DescType
2459 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2460 {
2461 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2462 Desc.Legacy.Gen.u4Type));
2463 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2464 }
2465
2466 /* Check privileges for data segments and non-conforming code segments. */
2467 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2468 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2469 {
2470 /* The RPL and the new CPL must be less than or equal to the DPL. */
2471 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2472 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2473 {
2474 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2475 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2476 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2477 }
2478 }
2479
2480 /* Is it there? */
2481 if (!Desc.Legacy.Gen.u1Present)
2482 {
2483 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2484 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2485 }
2486
2487 /* The base and limit. */
2488 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2489 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2490
2491 /*
2492 * Ok, everything checked out fine. Now set the accessed bit before
2493 * committing the result into the registers.
2494 */
2495 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2496 {
2497 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2498 if (rcStrict != VINF_SUCCESS)
2499 return rcStrict;
2500 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2501 }
2502
2503 /* Commit */
2504 pSReg->Sel = uSel;
2505 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2506 pSReg->u32Limit = cbLimit;
2507 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2508 pSReg->ValidSel = uSel;
2509 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2510 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2511 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2512
2513 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2514 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2515 return VINF_SUCCESS;
2516}
2517
2518
2519/**
2520 * Performs a task switch.
2521 *
2522 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2523 * caller is responsible for performing the necessary checks (like DPL, TSS
2524 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2525 * reference for JMP, CALL, IRET.
2526 *
2527 * If the task switch is the due to a software interrupt or hardware exception,
2528 * the caller is responsible for validating the TSS selector and descriptor. See
2529 * Intel Instruction reference for INT n.
2530 *
2531 * @returns VBox strict status code.
2532 * @param pIemCpu The IEM per CPU instance data.
2533 * @param pCtx The CPU context.
2534 * @param enmTaskSwitch What caused this task switch.
2535 * @param uNextEip The EIP effective after the task switch.
2536 * @param fFlags The flags.
2537 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2538 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2539 * @param SelTSS The TSS selector of the new task.
2540 * @param pNewDescTSS Pointer to the new TSS descriptor.
2541 */
2542IEM_STATIC VBOXSTRICTRC
2543iemTaskSwitch(PIEMCPU pIemCpu,
2544 PCPUMCTX pCtx,
2545 IEMTASKSWITCH enmTaskSwitch,
2546 uint32_t uNextEip,
2547 uint32_t fFlags,
2548 uint16_t uErr,
2549 uint64_t uCr2,
2550 RTSEL SelTSS,
2551 PIEMSELDESC pNewDescTSS)
2552{
2553 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2554 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2555
2556 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2557 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2558 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2559 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2560 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2561
2562 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2563 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2564
2565 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2566 fIsNewTSS386, pCtx->eip, uNextEip));
2567
2568 /* Update CR2 in case it's a page-fault. */
2569 /** @todo This should probably be done much earlier in IEM/PGM. See
2570 * @bugref{5653#c49}. */
2571 if (fFlags & IEM_XCPT_FLAGS_CR2)
2572 pCtx->cr2 = uCr2;
2573
2574 /*
2575 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2576 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2577 */
2578 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2579 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2580 if (uNewTSSLimit < uNewTSSLimitMin)
2581 {
2582 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2583 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2584 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2585 }
2586
2587 /*
2588 * Check the current TSS limit. The last written byte to the current TSS during the
2589 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2590 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2591 *
2592 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2593 * end up with smaller than "legal" TSS limits.
2594 */
2595 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2596 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2597 if (uCurTSSLimit < uCurTSSLimitMin)
2598 {
2599 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2600 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2601 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2602 }
2603
2604 /*
2605 * Verify that the new TSS can be accessed and map it. Map only the required contents
2606 * and not the entire TSS.
2607 */
2608 void *pvNewTSS;
2609 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2610 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2611 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2612 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2613 * not perform correct translation if this happens. See Intel spec. 7.2.1
2614 * "Task-State Segment" */
2615 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2616 if (rcStrict != VINF_SUCCESS)
2617 {
2618 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2619 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2620 return rcStrict;
2621 }
2622
2623 /*
2624 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2625 */
2626 uint32_t u32EFlags = pCtx->eflags.u32;
2627 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2628 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2629 {
2630 PX86DESC pDescCurTSS;
2631 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2632 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2633 if (rcStrict != VINF_SUCCESS)
2634 {
2635 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2636 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2637 return rcStrict;
2638 }
2639
2640 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2641 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2642 if (rcStrict != VINF_SUCCESS)
2643 {
2644 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2645 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2646 return rcStrict;
2647 }
2648
2649 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2650 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2651 {
2652 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2653 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2654 u32EFlags &= ~X86_EFL_NT;
2655 }
2656 }
2657
2658 /*
2659 * Save the CPU state into the current TSS.
2660 */
2661 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2662 if (GCPtrNewTSS == GCPtrCurTSS)
2663 {
2664 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2665 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2666 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2667 }
2668 if (fIsNewTSS386)
2669 {
2670 /*
2671 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2672 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2673 */
2674 void *pvCurTSS32;
2675 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2676 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2677 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2678 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2679 if (rcStrict != VINF_SUCCESS)
2680 {
2681 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2682 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2683 return rcStrict;
2684 }
2685
2686 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2687 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2688 pCurTSS32->eip = uNextEip;
2689 pCurTSS32->eflags = u32EFlags;
2690 pCurTSS32->eax = pCtx->eax;
2691 pCurTSS32->ecx = pCtx->ecx;
2692 pCurTSS32->edx = pCtx->edx;
2693 pCurTSS32->ebx = pCtx->ebx;
2694 pCurTSS32->esp = pCtx->esp;
2695 pCurTSS32->ebp = pCtx->ebp;
2696 pCurTSS32->esi = pCtx->esi;
2697 pCurTSS32->edi = pCtx->edi;
2698 pCurTSS32->es = pCtx->es.Sel;
2699 pCurTSS32->cs = pCtx->cs.Sel;
2700 pCurTSS32->ss = pCtx->ss.Sel;
2701 pCurTSS32->ds = pCtx->ds.Sel;
2702 pCurTSS32->fs = pCtx->fs.Sel;
2703 pCurTSS32->gs = pCtx->gs.Sel;
2704
2705 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2706 if (rcStrict != VINF_SUCCESS)
2707 {
2708 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2709 VBOXSTRICTRC_VAL(rcStrict)));
2710 return rcStrict;
2711 }
2712 }
2713 else
2714 {
2715 /*
2716 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2717 */
2718 void *pvCurTSS16;
2719 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2720 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2721 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2722 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2723 if (rcStrict != VINF_SUCCESS)
2724 {
2725 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2726 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2727 return rcStrict;
2728 }
2729
2730 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2731 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2732 pCurTSS16->ip = uNextEip;
2733 pCurTSS16->flags = u32EFlags;
2734 pCurTSS16->ax = pCtx->ax;
2735 pCurTSS16->cx = pCtx->cx;
2736 pCurTSS16->dx = pCtx->dx;
2737 pCurTSS16->bx = pCtx->bx;
2738 pCurTSS16->sp = pCtx->sp;
2739 pCurTSS16->bp = pCtx->bp;
2740 pCurTSS16->si = pCtx->si;
2741 pCurTSS16->di = pCtx->di;
2742 pCurTSS16->es = pCtx->es.Sel;
2743 pCurTSS16->cs = pCtx->cs.Sel;
2744 pCurTSS16->ss = pCtx->ss.Sel;
2745 pCurTSS16->ds = pCtx->ds.Sel;
2746
2747 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2748 if (rcStrict != VINF_SUCCESS)
2749 {
2750 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2751 VBOXSTRICTRC_VAL(rcStrict)));
2752 return rcStrict;
2753 }
2754 }
2755
2756 /*
2757 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2758 */
2759 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2760 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2761 {
2762 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2763 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2764 pNewTSS->selPrev = pCtx->tr.Sel;
2765 }
2766
2767 /*
2768 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2769 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2770 */
2771 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2772 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2773 bool fNewDebugTrap;
2774 if (fIsNewTSS386)
2775 {
2776 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2777 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2778 uNewEip = pNewTSS32->eip;
2779 uNewEflags = pNewTSS32->eflags;
2780 uNewEax = pNewTSS32->eax;
2781 uNewEcx = pNewTSS32->ecx;
2782 uNewEdx = pNewTSS32->edx;
2783 uNewEbx = pNewTSS32->ebx;
2784 uNewEsp = pNewTSS32->esp;
2785 uNewEbp = pNewTSS32->ebp;
2786 uNewEsi = pNewTSS32->esi;
2787 uNewEdi = pNewTSS32->edi;
2788 uNewES = pNewTSS32->es;
2789 uNewCS = pNewTSS32->cs;
2790 uNewSS = pNewTSS32->ss;
2791 uNewDS = pNewTSS32->ds;
2792 uNewFS = pNewTSS32->fs;
2793 uNewGS = pNewTSS32->gs;
2794 uNewLdt = pNewTSS32->selLdt;
2795 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2796 }
2797 else
2798 {
2799 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2800 uNewCr3 = 0;
2801 uNewEip = pNewTSS16->ip;
2802 uNewEflags = pNewTSS16->flags;
2803 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2804 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2805 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2806 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2807 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2808 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2809 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2810 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2811 uNewES = pNewTSS16->es;
2812 uNewCS = pNewTSS16->cs;
2813 uNewSS = pNewTSS16->ss;
2814 uNewDS = pNewTSS16->ds;
2815 uNewFS = 0;
2816 uNewGS = 0;
2817 uNewLdt = pNewTSS16->selLdt;
2818 fNewDebugTrap = false;
2819 }
2820
2821 if (GCPtrNewTSS == GCPtrCurTSS)
2822 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2823 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2824
2825 /*
2826 * We're done accessing the new TSS.
2827 */
2828 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2829 if (rcStrict != VINF_SUCCESS)
2830 {
2831 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2832 return rcStrict;
2833 }
2834
2835 /*
2836 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2837 */
2838 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2839 {
2840 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2841 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2842 if (rcStrict != VINF_SUCCESS)
2843 {
2844 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2845 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2846 return rcStrict;
2847 }
2848
2849 /* Check that the descriptor indicates the new TSS is available (not busy). */
2850 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2851 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2852 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2853
2854 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2855 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2856 if (rcStrict != VINF_SUCCESS)
2857 {
2858 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2859 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2860 return rcStrict;
2861 }
2862 }
2863
2864 /*
2865 * From this point on, we're technically in the new task. We will defer exceptions
2866 * until the completion of the task switch but before executing any instructions in the new task.
2867 */
2868 pCtx->tr.Sel = SelTSS;
2869 pCtx->tr.ValidSel = SelTSS;
2870 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2871 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2872 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2873 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2874 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2875
2876 /* Set the busy bit in TR. */
2877 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2878 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2879 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2880 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2881 {
2882 uNewEflags |= X86_EFL_NT;
2883 }
2884
2885 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2886 pCtx->cr0 |= X86_CR0_TS;
2887 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2888
2889 pCtx->eip = uNewEip;
2890 pCtx->eax = uNewEax;
2891 pCtx->ecx = uNewEcx;
2892 pCtx->edx = uNewEdx;
2893 pCtx->ebx = uNewEbx;
2894 pCtx->esp = uNewEsp;
2895 pCtx->ebp = uNewEbp;
2896 pCtx->esi = uNewEsi;
2897 pCtx->edi = uNewEdi;
2898
2899 uNewEflags &= X86_EFL_LIVE_MASK;
2900 uNewEflags |= X86_EFL_RA1_MASK;
2901 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2902
2903 /*
2904 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2905 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2906 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2907 */
2908 pCtx->es.Sel = uNewES;
2909 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2910 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2911
2912 pCtx->cs.Sel = uNewCS;
2913 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2914 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2915
2916 pCtx->ss.Sel = uNewSS;
2917 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2918 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2919
2920 pCtx->ds.Sel = uNewDS;
2921 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2922 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2923
2924 pCtx->fs.Sel = uNewFS;
2925 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2926 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2927
2928 pCtx->gs.Sel = uNewGS;
2929 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2930 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2931 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2932
2933 pCtx->ldtr.Sel = uNewLdt;
2934 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2935 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2936 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2937
2938 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2939 {
2940 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2941 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2942 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2943 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2944 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2945 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2946 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2947 }
2948
2949 /*
2950 * Switch CR3 for the new task.
2951 */
2952 if ( fIsNewTSS386
2953 && (pCtx->cr0 & X86_CR0_PG))
2954 {
2955 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2956 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2957 {
2958 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2959 AssertRCSuccessReturn(rc, rc);
2960 }
2961 else
2962 pCtx->cr3 = uNewCr3;
2963
2964 /* Inform PGM. */
2965 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2966 {
2967 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2968 AssertRCReturn(rc, rc);
2969 /* ignore informational status codes */
2970 }
2971 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2972 }
2973
2974 /*
2975 * Switch LDTR for the new task.
2976 */
2977 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2978 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2979 else
2980 {
2981 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2982
2983 IEMSELDESC DescNewLdt;
2984 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2985 if (rcStrict != VINF_SUCCESS)
2986 {
2987 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2988 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2989 return rcStrict;
2990 }
2991 if ( !DescNewLdt.Legacy.Gen.u1Present
2992 || DescNewLdt.Legacy.Gen.u1DescType
2993 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2994 {
2995 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2996 uNewLdt, DescNewLdt.Legacy.u));
2997 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2998 }
2999
3000 pCtx->ldtr.ValidSel = uNewLdt;
3001 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3002 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3003 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3004 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3005 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3006 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3007 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
3008 }
3009
3010 IEMSELDESC DescSS;
3011 if (IEM_IS_V86_MODE(pIemCpu))
3012 {
3013 pIemCpu->uCpl = 3;
3014 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
3015 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
3016 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
3017 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
3018 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
3019 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
3020 }
3021 else
3022 {
3023 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
3024
3025 /*
3026 * Load the stack segment for the new task.
3027 */
3028 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3029 {
3030 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3031 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3032 }
3033
3034 /* Fetch the descriptor. */
3035 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
3036 if (rcStrict != VINF_SUCCESS)
3037 {
3038 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3039 VBOXSTRICTRC_VAL(rcStrict)));
3040 return rcStrict;
3041 }
3042
3043 /* SS must be a data segment and writable. */
3044 if ( !DescSS.Legacy.Gen.u1DescType
3045 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3046 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3047 {
3048 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3049 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3050 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3051 }
3052
3053 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3054 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3055 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3056 {
3057 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3058 uNewCpl));
3059 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3060 }
3061
3062 /* Is it there? */
3063 if (!DescSS.Legacy.Gen.u1Present)
3064 {
3065 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3066 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3067 }
3068
3069 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3070 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3071
3072 /* Set the accessed bit before committing the result into SS. */
3073 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3074 {
3075 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3076 if (rcStrict != VINF_SUCCESS)
3077 return rcStrict;
3078 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3079 }
3080
3081 /* Commit SS. */
3082 pCtx->ss.Sel = uNewSS;
3083 pCtx->ss.ValidSel = uNewSS;
3084 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3085 pCtx->ss.u32Limit = cbLimit;
3086 pCtx->ss.u64Base = u64Base;
3087 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3088 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3089
3090 /* CPL has changed, update IEM before loading rest of segments. */
3091 pIemCpu->uCpl = uNewCpl;
3092
3093 /*
3094 * Load the data segments for the new task.
3095 */
3096 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3097 if (rcStrict != VINF_SUCCESS)
3098 return rcStrict;
3099 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3100 if (rcStrict != VINF_SUCCESS)
3101 return rcStrict;
3102 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3103 if (rcStrict != VINF_SUCCESS)
3104 return rcStrict;
3105 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3106 if (rcStrict != VINF_SUCCESS)
3107 return rcStrict;
3108
3109 /*
3110 * Load the code segment for the new task.
3111 */
3112 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3113 {
3114 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3115 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3116 }
3117
3118 /* Fetch the descriptor. */
3119 IEMSELDESC DescCS;
3120 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3121 if (rcStrict != VINF_SUCCESS)
3122 {
3123 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3124 return rcStrict;
3125 }
3126
3127 /* CS must be a code segment. */
3128 if ( !DescCS.Legacy.Gen.u1DescType
3129 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3130 {
3131 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3132 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3133 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3134 }
3135
3136 /* For conforming CS, DPL must be less than or equal to the RPL. */
3137 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3138 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3139 {
3140 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3141 DescCS.Legacy.Gen.u2Dpl));
3142 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3143 }
3144
3145 /* For non-conforming CS, DPL must match RPL. */
3146 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3147 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3148 {
3149 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3150 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3151 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3152 }
3153
3154 /* Is it there? */
3155 if (!DescCS.Legacy.Gen.u1Present)
3156 {
3157 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3158 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3159 }
3160
3161 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3162 u64Base = X86DESC_BASE(&DescCS.Legacy);
3163
3164 /* Set the accessed bit before committing the result into CS. */
3165 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3166 {
3167 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3168 if (rcStrict != VINF_SUCCESS)
3169 return rcStrict;
3170 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3171 }
3172
3173 /* Commit CS. */
3174 pCtx->cs.Sel = uNewCS;
3175 pCtx->cs.ValidSel = uNewCS;
3176 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3177 pCtx->cs.u32Limit = cbLimit;
3178 pCtx->cs.u64Base = u64Base;
3179 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3180 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3181 }
3182
3183 /** @todo Debug trap. */
3184 if (fIsNewTSS386 && fNewDebugTrap)
3185 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3186
3187 /*
3188 * Construct the error code masks based on what caused this task switch.
3189 * See Intel Instruction reference for INT.
3190 */
3191 uint16_t uExt;
3192 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3193 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3194 {
3195 uExt = 1;
3196 }
3197 else
3198 uExt = 0;
3199
3200 /*
3201 * Push any error code on to the new stack.
3202 */
3203 if (fFlags & IEM_XCPT_FLAGS_ERR)
3204 {
3205 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3206 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3207 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3208
3209 /* Check that there is sufficient space on the stack. */
3210 /** @todo Factor out segment limit checking for normal/expand down segments
3211 * into a separate function. */
3212 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3213 {
3214 if ( pCtx->esp - 1 > cbLimitSS
3215 || pCtx->esp < cbStackFrame)
3216 {
3217 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3218 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3219 cbStackFrame));
3220 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3221 }
3222 }
3223 else
3224 {
3225 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3226 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3227 {
3228 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3229 cbStackFrame));
3230 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3231 }
3232 }
3233
3234
3235 if (fIsNewTSS386)
3236 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3237 else
3238 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3239 if (rcStrict != VINF_SUCCESS)
3240 {
3241 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3242 VBOXSTRICTRC_VAL(rcStrict)));
3243 return rcStrict;
3244 }
3245 }
3246
3247 /* Check the new EIP against the new CS limit. */
3248 if (pCtx->eip > pCtx->cs.u32Limit)
3249 {
3250 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3251 pCtx->eip, pCtx->cs.u32Limit));
3252 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3253 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3254 }
3255
3256 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3257 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3258}
3259
3260
3261/**
3262 * Implements exceptions and interrupts for protected mode.
3263 *
3264 * @returns VBox strict status code.
3265 * @param pIemCpu The IEM per CPU instance data.
3266 * @param pCtx The CPU context.
3267 * @param cbInstr The number of bytes to offset rIP by in the return
3268 * address.
3269 * @param u8Vector The interrupt / exception vector number.
3270 * @param fFlags The flags.
3271 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3272 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3273 */
3274IEM_STATIC VBOXSTRICTRC
3275iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3276 PCPUMCTX pCtx,
3277 uint8_t cbInstr,
3278 uint8_t u8Vector,
3279 uint32_t fFlags,
3280 uint16_t uErr,
3281 uint64_t uCr2)
3282{
3283 /*
3284 * Read the IDT entry.
3285 */
3286 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3287 {
3288 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3289 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3290 }
3291 X86DESC Idte;
3292 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3293 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3294 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3295 return rcStrict;
3296 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3297 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3298 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3299
3300 /*
3301 * Check the descriptor type, DPL and such.
3302 * ASSUMES this is done in the same order as described for call-gate calls.
3303 */
3304 if (Idte.Gate.u1DescType)
3305 {
3306 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3307 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3308 }
3309 bool fTaskGate = false;
3310 uint8_t f32BitGate = true;
3311 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3312 switch (Idte.Gate.u4Type)
3313 {
3314 case X86_SEL_TYPE_SYS_UNDEFINED:
3315 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3316 case X86_SEL_TYPE_SYS_LDT:
3317 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3318 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3319 case X86_SEL_TYPE_SYS_UNDEFINED2:
3320 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3321 case X86_SEL_TYPE_SYS_UNDEFINED3:
3322 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3323 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3324 case X86_SEL_TYPE_SYS_UNDEFINED4:
3325 {
3326 /** @todo check what actually happens when the type is wrong...
3327 * esp. call gates. */
3328 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3329 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3330 }
3331
3332 case X86_SEL_TYPE_SYS_286_INT_GATE:
3333 f32BitGate = false;
3334 case X86_SEL_TYPE_SYS_386_INT_GATE:
3335 fEflToClear |= X86_EFL_IF;
3336 break;
3337
3338 case X86_SEL_TYPE_SYS_TASK_GATE:
3339 fTaskGate = true;
3340#ifndef IEM_IMPLEMENTS_TASKSWITCH
3341 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3342#endif
3343 break;
3344
3345 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3346 f32BitGate = false;
3347 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3348 break;
3349
3350 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3351 }
3352
3353 /* Check DPL against CPL if applicable. */
3354 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3355 {
3356 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3357 {
3358 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3359 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3360 }
3361 }
3362
3363 /* Is it there? */
3364 if (!Idte.Gate.u1Present)
3365 {
3366 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3367 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3368 }
3369
3370 /* Is it a task-gate? */
3371 if (fTaskGate)
3372 {
3373 /*
3374 * Construct the error code masks based on what caused this task switch.
3375 * See Intel Instruction reference for INT.
3376 */
3377 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3378 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3379 RTSEL SelTSS = Idte.Gate.u16Sel;
3380
3381 /*
3382 * Fetch the TSS descriptor in the GDT.
3383 */
3384 IEMSELDESC DescTSS;
3385 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3386 if (rcStrict != VINF_SUCCESS)
3387 {
3388 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3389 VBOXSTRICTRC_VAL(rcStrict)));
3390 return rcStrict;
3391 }
3392
3393 /* The TSS descriptor must be a system segment and be available (not busy). */
3394 if ( DescTSS.Legacy.Gen.u1DescType
3395 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3396 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3397 {
3398 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3399 u8Vector, SelTSS, DescTSS.Legacy.au64));
3400 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3401 }
3402
3403 /* The TSS must be present. */
3404 if (!DescTSS.Legacy.Gen.u1Present)
3405 {
3406 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3407 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3408 }
3409
3410 /* Do the actual task switch. */
3411 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3412 }
3413
3414 /* A null CS is bad. */
3415 RTSEL NewCS = Idte.Gate.u16Sel;
3416 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3417 {
3418 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3419 return iemRaiseGeneralProtectionFault0(pIemCpu);
3420 }
3421
3422 /* Fetch the descriptor for the new CS. */
3423 IEMSELDESC DescCS;
3424 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3425 if (rcStrict != VINF_SUCCESS)
3426 {
3427 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3428 return rcStrict;
3429 }
3430
3431 /* Must be a code segment. */
3432 if (!DescCS.Legacy.Gen.u1DescType)
3433 {
3434 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3435 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3436 }
3437 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3438 {
3439 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3440 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3441 }
3442
3443 /* Don't allow lowering the privilege level. */
3444 /** @todo Does the lowering of privileges apply to software interrupts
3445 * only? This has bearings on the more-privileged or
3446 * same-privilege stack behavior further down. A testcase would
3447 * be nice. */
3448 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3449 {
3450 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3451 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3452 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3453 }
3454
3455 /* Make sure the selector is present. */
3456 if (!DescCS.Legacy.Gen.u1Present)
3457 {
3458 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3459 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3460 }
3461
3462 /* Check the new EIP against the new CS limit. */
3463 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3464 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3465 ? Idte.Gate.u16OffsetLow
3466 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3467 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3468 if (uNewEip > cbLimitCS)
3469 {
3470 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3471 u8Vector, uNewEip, cbLimitCS, NewCS));
3472 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3473 }
3474
3475 /* Calc the flag image to push. */
3476 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3477 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3478 fEfl &= ~X86_EFL_RF;
3479 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3480 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3481
3482 /* From V8086 mode only go to CPL 0. */
3483 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3484 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3485 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3486 {
3487 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3488 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3489 }
3490
3491 /*
3492 * If the privilege level changes, we need to get a new stack from the TSS.
3493 * This in turns means validating the new SS and ESP...
3494 */
3495 if (uNewCpl != pIemCpu->uCpl)
3496 {
3497 RTSEL NewSS;
3498 uint32_t uNewEsp;
3499 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3500 if (rcStrict != VINF_SUCCESS)
3501 return rcStrict;
3502
3503 IEMSELDESC DescSS;
3504 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3505 if (rcStrict != VINF_SUCCESS)
3506 return rcStrict;
3507
3508 /* Check that there is sufficient space for the stack frame. */
3509 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3510 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3511 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3512 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3513
3514 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3515 {
3516 if ( uNewEsp - 1 > cbLimitSS
3517 || uNewEsp < cbStackFrame)
3518 {
3519 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3520 u8Vector, NewSS, uNewEsp, cbStackFrame));
3521 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3522 }
3523 }
3524 else
3525 {
3526 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3527 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3528 {
3529 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3530 u8Vector, NewSS, uNewEsp, cbStackFrame));
3531 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3532 }
3533 }
3534
3535 /*
3536 * Start making changes.
3537 */
3538
3539 /* Create the stack frame. */
3540 RTPTRUNION uStackFrame;
3541 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3542 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3543 if (rcStrict != VINF_SUCCESS)
3544 return rcStrict;
3545 void * const pvStackFrame = uStackFrame.pv;
3546 if (f32BitGate)
3547 {
3548 if (fFlags & IEM_XCPT_FLAGS_ERR)
3549 *uStackFrame.pu32++ = uErr;
3550 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3551 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3552 uStackFrame.pu32[2] = fEfl;
3553 uStackFrame.pu32[3] = pCtx->esp;
3554 uStackFrame.pu32[4] = pCtx->ss.Sel;
3555 if (fEfl & X86_EFL_VM)
3556 {
3557 uStackFrame.pu32[1] = pCtx->cs.Sel;
3558 uStackFrame.pu32[5] = pCtx->es.Sel;
3559 uStackFrame.pu32[6] = pCtx->ds.Sel;
3560 uStackFrame.pu32[7] = pCtx->fs.Sel;
3561 uStackFrame.pu32[8] = pCtx->gs.Sel;
3562 }
3563 }
3564 else
3565 {
3566 if (fFlags & IEM_XCPT_FLAGS_ERR)
3567 *uStackFrame.pu16++ = uErr;
3568 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3569 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3570 uStackFrame.pu16[2] = fEfl;
3571 uStackFrame.pu16[3] = pCtx->sp;
3572 uStackFrame.pu16[4] = pCtx->ss.Sel;
3573 if (fEfl & X86_EFL_VM)
3574 {
3575 uStackFrame.pu16[1] = pCtx->cs.Sel;
3576 uStackFrame.pu16[5] = pCtx->es.Sel;
3577 uStackFrame.pu16[6] = pCtx->ds.Sel;
3578 uStackFrame.pu16[7] = pCtx->fs.Sel;
3579 uStackFrame.pu16[8] = pCtx->gs.Sel;
3580 }
3581 }
3582 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3583 if (rcStrict != VINF_SUCCESS)
3584 return rcStrict;
3585
3586 /* Mark the selectors 'accessed' (hope this is the correct time). */
3587 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3588 * after pushing the stack frame? (Write protect the gdt + stack to
3589 * find out.) */
3590 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3591 {
3592 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3593 if (rcStrict != VINF_SUCCESS)
3594 return rcStrict;
3595 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3596 }
3597
3598 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3599 {
3600 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3601 if (rcStrict != VINF_SUCCESS)
3602 return rcStrict;
3603 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3604 }
3605
3606 /*
3607 * Start comitting the register changes (joins with the DPL=CPL branch).
3608 */
3609 pCtx->ss.Sel = NewSS;
3610 pCtx->ss.ValidSel = NewSS;
3611 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3612 pCtx->ss.u32Limit = cbLimitSS;
3613 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3614 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3615 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3616 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3617 * SP is loaded).
3618 * Need to check the other combinations too:
3619 * - 16-bit TSS, 32-bit handler
3620 * - 32-bit TSS, 16-bit handler */
3621 if (!pCtx->ss.Attr.n.u1DefBig)
3622 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
3623 else
3624 pCtx->rsp = uNewEsp - cbStackFrame;
3625 pIemCpu->uCpl = uNewCpl;
3626
3627 if (fEfl & X86_EFL_VM)
3628 {
3629 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3630 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3631 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3632 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3633 }
3634 }
3635 /*
3636 * Same privilege, no stack change and smaller stack frame.
3637 */
3638 else
3639 {
3640 uint64_t uNewRsp;
3641 RTPTRUNION uStackFrame;
3642 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3643 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3644 if (rcStrict != VINF_SUCCESS)
3645 return rcStrict;
3646 void * const pvStackFrame = uStackFrame.pv;
3647
3648 if (f32BitGate)
3649 {
3650 if (fFlags & IEM_XCPT_FLAGS_ERR)
3651 *uStackFrame.pu32++ = uErr;
3652 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3653 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3654 uStackFrame.pu32[2] = fEfl;
3655 }
3656 else
3657 {
3658 if (fFlags & IEM_XCPT_FLAGS_ERR)
3659 *uStackFrame.pu16++ = uErr;
3660 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3661 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3662 uStackFrame.pu16[2] = fEfl;
3663 }
3664 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3665 if (rcStrict != VINF_SUCCESS)
3666 return rcStrict;
3667
3668 /* Mark the CS selector as 'accessed'. */
3669 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3670 {
3671 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3672 if (rcStrict != VINF_SUCCESS)
3673 return rcStrict;
3674 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3675 }
3676
3677 /*
3678 * Start committing the register changes (joins with the other branch).
3679 */
3680 pCtx->rsp = uNewRsp;
3681 }
3682
3683 /* ... register committing continues. */
3684 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3685 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3686 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3687 pCtx->cs.u32Limit = cbLimitCS;
3688 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3689 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3690
3691 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3692 fEfl &= ~fEflToClear;
3693 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3694
3695 if (fFlags & IEM_XCPT_FLAGS_CR2)
3696 pCtx->cr2 = uCr2;
3697
3698 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3699 iemRaiseXcptAdjustState(pCtx, u8Vector);
3700
3701 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3702}
3703
3704
3705/**
3706 * Implements exceptions and interrupts for long mode.
3707 *
3708 * @returns VBox strict status code.
3709 * @param pIemCpu The IEM per CPU instance data.
3710 * @param pCtx The CPU context.
3711 * @param cbInstr The number of bytes to offset rIP by in the return
3712 * address.
3713 * @param u8Vector The interrupt / exception vector number.
3714 * @param fFlags The flags.
3715 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3716 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3717 */
3718IEM_STATIC VBOXSTRICTRC
3719iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3720 PCPUMCTX pCtx,
3721 uint8_t cbInstr,
3722 uint8_t u8Vector,
3723 uint32_t fFlags,
3724 uint16_t uErr,
3725 uint64_t uCr2)
3726{
3727 /*
3728 * Read the IDT entry.
3729 */
3730 uint16_t offIdt = (uint16_t)u8Vector << 4;
3731 if (pCtx->idtr.cbIdt < offIdt + 7)
3732 {
3733 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3734 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3735 }
3736 X86DESC64 Idte;
3737 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3738 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3739 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3740 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3741 return rcStrict;
3742 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3743 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3744 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3745
3746 /*
3747 * Check the descriptor type, DPL and such.
3748 * ASSUMES this is done in the same order as described for call-gate calls.
3749 */
3750 if (Idte.Gate.u1DescType)
3751 {
3752 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3753 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3754 }
3755 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3756 switch (Idte.Gate.u4Type)
3757 {
3758 case AMD64_SEL_TYPE_SYS_INT_GATE:
3759 fEflToClear |= X86_EFL_IF;
3760 break;
3761 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3762 break;
3763
3764 default:
3765 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3766 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3767 }
3768
3769 /* Check DPL against CPL if applicable. */
3770 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3771 {
3772 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3773 {
3774 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3775 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3776 }
3777 }
3778
3779 /* Is it there? */
3780 if (!Idte.Gate.u1Present)
3781 {
3782 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3783 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3784 }
3785
3786 /* A null CS is bad. */
3787 RTSEL NewCS = Idte.Gate.u16Sel;
3788 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3789 {
3790 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3791 return iemRaiseGeneralProtectionFault0(pIemCpu);
3792 }
3793
3794 /* Fetch the descriptor for the new CS. */
3795 IEMSELDESC DescCS;
3796 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3797 if (rcStrict != VINF_SUCCESS)
3798 {
3799 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3800 return rcStrict;
3801 }
3802
3803 /* Must be a 64-bit code segment. */
3804 if (!DescCS.Long.Gen.u1DescType)
3805 {
3806 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3807 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3808 }
3809 if ( !DescCS.Long.Gen.u1Long
3810 || DescCS.Long.Gen.u1DefBig
3811 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3812 {
3813 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3814 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3815 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3816 }
3817
3818 /* Don't allow lowering the privilege level. For non-conforming CS
3819 selectors, the CS.DPL sets the privilege level the trap/interrupt
3820 handler runs at. For conforming CS selectors, the CPL remains
3821 unchanged, but the CS.DPL must be <= CPL. */
3822 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3823 * when CPU in Ring-0. Result \#GP? */
3824 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3825 {
3826 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3827 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3828 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3829 }
3830
3831
3832 /* Make sure the selector is present. */
3833 if (!DescCS.Legacy.Gen.u1Present)
3834 {
3835 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3836 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3837 }
3838
3839 /* Check that the new RIP is canonical. */
3840 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3841 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3842 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3843 if (!IEM_IS_CANONICAL(uNewRip))
3844 {
3845 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3846 return iemRaiseGeneralProtectionFault0(pIemCpu);
3847 }
3848
3849 /*
3850 * If the privilege level changes or if the IST isn't zero, we need to get
3851 * a new stack from the TSS.
3852 */
3853 uint64_t uNewRsp;
3854 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3855 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3856 if ( uNewCpl != pIemCpu->uCpl
3857 || Idte.Gate.u3IST != 0)
3858 {
3859 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3860 if (rcStrict != VINF_SUCCESS)
3861 return rcStrict;
3862 }
3863 else
3864 uNewRsp = pCtx->rsp;
3865 uNewRsp &= ~(uint64_t)0xf;
3866
3867 /*
3868 * Calc the flag image to push.
3869 */
3870 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3871 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3872 fEfl &= ~X86_EFL_RF;
3873 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3874 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3875
3876 /*
3877 * Start making changes.
3878 */
3879
3880 /* Create the stack frame. */
3881 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3882 RTPTRUNION uStackFrame;
3883 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3884 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3885 if (rcStrict != VINF_SUCCESS)
3886 return rcStrict;
3887 void * const pvStackFrame = uStackFrame.pv;
3888
3889 if (fFlags & IEM_XCPT_FLAGS_ERR)
3890 *uStackFrame.pu64++ = uErr;
3891 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3892 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3893 uStackFrame.pu64[2] = fEfl;
3894 uStackFrame.pu64[3] = pCtx->rsp;
3895 uStackFrame.pu64[4] = pCtx->ss.Sel;
3896 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3897 if (rcStrict != VINF_SUCCESS)
3898 return rcStrict;
3899
3900 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3901 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3902 * after pushing the stack frame? (Write protect the gdt + stack to
3903 * find out.) */
3904 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3905 {
3906 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3907 if (rcStrict != VINF_SUCCESS)
3908 return rcStrict;
3909 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3910 }
3911
3912 /*
3913 * Start comitting the register changes.
3914 */
3915 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3916 * hidden registers when interrupting 32-bit or 16-bit code! */
3917 if (uNewCpl != pIemCpu->uCpl)
3918 {
3919 pCtx->ss.Sel = 0 | uNewCpl;
3920 pCtx->ss.ValidSel = 0 | uNewCpl;
3921 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3922 pCtx->ss.u32Limit = UINT32_MAX;
3923 pCtx->ss.u64Base = 0;
3924 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3925 }
3926 pCtx->rsp = uNewRsp - cbStackFrame;
3927 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3928 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3929 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3930 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3931 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3932 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3933 pCtx->rip = uNewRip;
3934 pIemCpu->uCpl = uNewCpl;
3935
3936 fEfl &= ~fEflToClear;
3937 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3938
3939 if (fFlags & IEM_XCPT_FLAGS_CR2)
3940 pCtx->cr2 = uCr2;
3941
3942 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3943 iemRaiseXcptAdjustState(pCtx, u8Vector);
3944
3945 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3946}
3947
3948
3949/**
3950 * Implements exceptions and interrupts.
3951 *
3952 * All exceptions and interrupts goes thru this function!
3953 *
3954 * @returns VBox strict status code.
3955 * @param pIemCpu The IEM per CPU instance data.
3956 * @param cbInstr The number of bytes to offset rIP by in the return
3957 * address.
3958 * @param u8Vector The interrupt / exception vector number.
3959 * @param fFlags The flags.
3960 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3961 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3962 */
3963DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3964iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3965 uint8_t cbInstr,
3966 uint8_t u8Vector,
3967 uint32_t fFlags,
3968 uint16_t uErr,
3969 uint64_t uCr2)
3970{
3971 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3972#ifdef IN_RING0
3973 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3974 AssertRCReturn(rc, rc);
3975#endif
3976
3977 /*
3978 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3979 */
3980 if ( pCtx->eflags.Bits.u1VM
3981 && pCtx->eflags.Bits.u2IOPL != 3
3982 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3983 && (pCtx->cr0 & X86_CR0_PE) )
3984 {
3985 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3986 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3987 u8Vector = X86_XCPT_GP;
3988 uErr = 0;
3989 }
3990#ifdef DBGFTRACE_ENABLED
3991 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3992 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3993 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3994#endif
3995
3996 /*
3997 * Do recursion accounting.
3998 */
3999 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
4000 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
4001 if (pIemCpu->cXcptRecursions == 0)
4002 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4003 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
4004 else
4005 {
4006 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4007 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
4008
4009 /** @todo double and tripple faults. */
4010 if (pIemCpu->cXcptRecursions >= 3)
4011 {
4012#ifdef DEBUG_bird
4013 AssertFailed();
4014#endif
4015 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4016 }
4017
4018 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
4019 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
4020 {
4021 ....
4022 } */
4023 }
4024 pIemCpu->cXcptRecursions++;
4025 pIemCpu->uCurXcpt = u8Vector;
4026 pIemCpu->fCurXcpt = fFlags;
4027
4028 /*
4029 * Extensive logging.
4030 */
4031#if defined(LOG_ENABLED) && defined(IN_RING3)
4032 if (LogIs3Enabled())
4033 {
4034 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4035 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4036 char szRegs[4096];
4037 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4038 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4039 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4040 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4041 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4042 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4043 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4044 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4045 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4046 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4047 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4048 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4049 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4050 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4051 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4052 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4053 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4054 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4055 " efer=%016VR{efer}\n"
4056 " pat=%016VR{pat}\n"
4057 " sf_mask=%016VR{sf_mask}\n"
4058 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4059 " lstar=%016VR{lstar}\n"
4060 " star=%016VR{star} cstar=%016VR{cstar}\n"
4061 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4062 );
4063
4064 char szInstr[256];
4065 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4066 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4067 szInstr, sizeof(szInstr), NULL);
4068 Log3(("%s%s\n", szRegs, szInstr));
4069 }
4070#endif /* LOG_ENABLED */
4071
4072 /*
4073 * Call the mode specific worker function.
4074 */
4075 VBOXSTRICTRC rcStrict;
4076 if (!(pCtx->cr0 & X86_CR0_PE))
4077 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4078 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4079 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4080 else
4081 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4082
4083 /*
4084 * Unwind.
4085 */
4086 pIemCpu->cXcptRecursions--;
4087 pIemCpu->uCurXcpt = uPrevXcpt;
4088 pIemCpu->fCurXcpt = fPrevXcpt;
4089 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4090 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4091 return rcStrict;
4092}
4093
4094
4095/** \#DE - 00. */
4096DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4097{
4098 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4099}
4100
4101
4102/** \#DB - 01.
4103 * @note This automatically clear DR7.GD. */
4104DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4105{
4106 /** @todo set/clear RF. */
4107 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4108 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4109}
4110
4111
4112/** \#UD - 06. */
4113DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4114{
4115 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4116}
4117
4118
4119/** \#NM - 07. */
4120DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4121{
4122 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4123}
4124
4125
4126/** \#TS(err) - 0a. */
4127DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4128{
4129 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4130}
4131
4132
4133/** \#TS(tr) - 0a. */
4134DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4135{
4136 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4137 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4138}
4139
4140
4141/** \#TS(0) - 0a. */
4142DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4143{
4144 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4145 0, 0);
4146}
4147
4148
4149/** \#TS(err) - 0a. */
4150DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4151{
4152 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4153 uSel & X86_SEL_MASK_OFF_RPL, 0);
4154}
4155
4156
4157/** \#NP(err) - 0b. */
4158DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4159{
4160 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4161}
4162
4163
4164/** \#NP(seg) - 0b. */
4165DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4166{
4167 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4168 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4169}
4170
4171
4172/** \#NP(sel) - 0b. */
4173DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4174{
4175 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4176 uSel & ~X86_SEL_RPL, 0);
4177}
4178
4179
4180/** \#SS(seg) - 0c. */
4181DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4182{
4183 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4184 uSel & ~X86_SEL_RPL, 0);
4185}
4186
4187
4188/** \#SS(err) - 0c. */
4189DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4190{
4191 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4192}
4193
4194
4195/** \#GP(n) - 0d. */
4196DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4197{
4198 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4199}
4200
4201
4202/** \#GP(0) - 0d. */
4203DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4204{
4205 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4206}
4207
4208
4209/** \#GP(sel) - 0d. */
4210DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4211{
4212 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4213 Sel & ~X86_SEL_RPL, 0);
4214}
4215
4216
4217/** \#GP(0) - 0d. */
4218DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4219{
4220 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4221}
4222
4223
4224/** \#GP(sel) - 0d. */
4225DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4226{
4227 NOREF(iSegReg); NOREF(fAccess);
4228 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4229 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4230}
4231
4232
4233/** \#GP(sel) - 0d. */
4234DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4235{
4236 NOREF(Sel);
4237 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4238}
4239
4240
4241/** \#GP(sel) - 0d. */
4242DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4243{
4244 NOREF(iSegReg); NOREF(fAccess);
4245 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4246}
4247
4248
4249/** \#PF(n) - 0e. */
4250DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4251{
4252 uint16_t uErr;
4253 switch (rc)
4254 {
4255 case VERR_PAGE_NOT_PRESENT:
4256 case VERR_PAGE_TABLE_NOT_PRESENT:
4257 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4258 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4259 uErr = 0;
4260 break;
4261
4262 default:
4263 AssertMsgFailed(("%Rrc\n", rc));
4264 case VERR_ACCESS_DENIED:
4265 uErr = X86_TRAP_PF_P;
4266 break;
4267
4268 /** @todo reserved */
4269 }
4270
4271 if (pIemCpu->uCpl == 3)
4272 uErr |= X86_TRAP_PF_US;
4273
4274 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4275 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4276 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4277 uErr |= X86_TRAP_PF_ID;
4278
4279#if 0 /* This is so much non-sense, really. Why was it done like that? */
4280 /* Note! RW access callers reporting a WRITE protection fault, will clear
4281 the READ flag before calling. So, read-modify-write accesses (RW)
4282 can safely be reported as READ faults. */
4283 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4284 uErr |= X86_TRAP_PF_RW;
4285#else
4286 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4287 {
4288 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4289 uErr |= X86_TRAP_PF_RW;
4290 }
4291#endif
4292
4293 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4294 uErr, GCPtrWhere);
4295}
4296
4297
4298/** \#MF(0) - 10. */
4299DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4300{
4301 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4302}
4303
4304
4305/** \#AC(0) - 11. */
4306DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4307{
4308 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4309}
4310
4311
4312/**
4313 * Macro for calling iemCImplRaiseDivideError().
4314 *
4315 * This enables us to add/remove arguments and force different levels of
4316 * inlining as we wish.
4317 *
4318 * @return Strict VBox status code.
4319 */
4320#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4321IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4322{
4323 NOREF(cbInstr);
4324 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4325}
4326
4327
4328/**
4329 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4330 *
4331 * This enables us to add/remove arguments and force different levels of
4332 * inlining as we wish.
4333 *
4334 * @return Strict VBox status code.
4335 */
4336#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4337IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4338{
4339 NOREF(cbInstr);
4340 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4341}
4342
4343
4344/**
4345 * Macro for calling iemCImplRaiseInvalidOpcode().
4346 *
4347 * This enables us to add/remove arguments and force different levels of
4348 * inlining as we wish.
4349 *
4350 * @return Strict VBox status code.
4351 */
4352#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4353IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4354{
4355 NOREF(cbInstr);
4356 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4357}
4358
4359
4360/** @} */
4361
4362
4363/*
4364 *
4365 * Helpers routines.
4366 * Helpers routines.
4367 * Helpers routines.
4368 *
4369 */
4370
4371/**
4372 * Recalculates the effective operand size.
4373 *
4374 * @param pIemCpu The IEM state.
4375 */
4376IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4377{
4378 switch (pIemCpu->enmCpuMode)
4379 {
4380 case IEMMODE_16BIT:
4381 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4382 break;
4383 case IEMMODE_32BIT:
4384 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4385 break;
4386 case IEMMODE_64BIT:
4387 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4388 {
4389 case 0:
4390 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4391 break;
4392 case IEM_OP_PRF_SIZE_OP:
4393 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4394 break;
4395 case IEM_OP_PRF_SIZE_REX_W:
4396 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4397 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4398 break;
4399 }
4400 break;
4401 default:
4402 AssertFailed();
4403 }
4404}
4405
4406
4407/**
4408 * Sets the default operand size to 64-bit and recalculates the effective
4409 * operand size.
4410 *
4411 * @param pIemCpu The IEM state.
4412 */
4413IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4414{
4415 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4416 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4417 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4418 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4419 else
4420 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4421}
4422
4423
4424/*
4425 *
4426 * Common opcode decoders.
4427 * Common opcode decoders.
4428 * Common opcode decoders.
4429 *
4430 */
4431//#include <iprt/mem.h>
4432
4433/**
4434 * Used to add extra details about a stub case.
4435 * @param pIemCpu The IEM per CPU state.
4436 */
4437IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4438{
4439#if defined(LOG_ENABLED) && defined(IN_RING3)
4440 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4441 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4442 char szRegs[4096];
4443 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4444 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4445 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4446 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4447 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4448 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4449 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4450 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4451 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4452 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4453 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4454 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4455 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4456 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4457 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4458 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4459 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4460 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4461 " efer=%016VR{efer}\n"
4462 " pat=%016VR{pat}\n"
4463 " sf_mask=%016VR{sf_mask}\n"
4464 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4465 " lstar=%016VR{lstar}\n"
4466 " star=%016VR{star} cstar=%016VR{cstar}\n"
4467 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4468 );
4469
4470 char szInstr[256];
4471 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4472 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4473 szInstr, sizeof(szInstr), NULL);
4474
4475 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4476#else
4477 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4478#endif
4479}
4480
4481/**
4482 * Complains about a stub.
4483 *
4484 * Providing two versions of this macro, one for daily use and one for use when
4485 * working on IEM.
4486 */
4487#if 0
4488# define IEMOP_BITCH_ABOUT_STUB() \
4489 do { \
4490 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4491 iemOpStubMsg2(pIemCpu); \
4492 RTAssertPanic(); \
4493 } while (0)
4494#else
4495# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4496#endif
4497
4498/** Stubs an opcode. */
4499#define FNIEMOP_STUB(a_Name) \
4500 FNIEMOP_DEF(a_Name) \
4501 { \
4502 IEMOP_BITCH_ABOUT_STUB(); \
4503 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4504 } \
4505 typedef int ignore_semicolon
4506
4507/** Stubs an opcode. */
4508#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4509 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4510 { \
4511 IEMOP_BITCH_ABOUT_STUB(); \
4512 NOREF(a_Name0); \
4513 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4514 } \
4515 typedef int ignore_semicolon
4516
4517/** Stubs an opcode which currently should raise \#UD. */
4518#define FNIEMOP_UD_STUB(a_Name) \
4519 FNIEMOP_DEF(a_Name) \
4520 { \
4521 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4522 return IEMOP_RAISE_INVALID_OPCODE(); \
4523 } \
4524 typedef int ignore_semicolon
4525
4526/** Stubs an opcode which currently should raise \#UD. */
4527#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4528 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4529 { \
4530 NOREF(a_Name0); \
4531 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4532 return IEMOP_RAISE_INVALID_OPCODE(); \
4533 } \
4534 typedef int ignore_semicolon
4535
4536
4537
4538/** @name Register Access.
4539 * @{
4540 */
4541
4542/**
4543 * Gets a reference (pointer) to the specified hidden segment register.
4544 *
4545 * @returns Hidden register reference.
4546 * @param pIemCpu The per CPU data.
4547 * @param iSegReg The segment register.
4548 */
4549IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4550{
4551 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4552 PCPUMSELREG pSReg;
4553 switch (iSegReg)
4554 {
4555 case X86_SREG_ES: pSReg = &pCtx->es; break;
4556 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4557 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4558 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4559 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4560 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4561 default:
4562 AssertFailedReturn(NULL);
4563 }
4564#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4565 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4566 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4567#else
4568 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4569#endif
4570 return pSReg;
4571}
4572
4573
4574/**
4575 * Gets a reference (pointer) to the specified segment register (the selector
4576 * value).
4577 *
4578 * @returns Pointer to the selector variable.
4579 * @param pIemCpu The per CPU data.
4580 * @param iSegReg The segment register.
4581 */
4582IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4583{
4584 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4585 switch (iSegReg)
4586 {
4587 case X86_SREG_ES: return &pCtx->es.Sel;
4588 case X86_SREG_CS: return &pCtx->cs.Sel;
4589 case X86_SREG_SS: return &pCtx->ss.Sel;
4590 case X86_SREG_DS: return &pCtx->ds.Sel;
4591 case X86_SREG_FS: return &pCtx->fs.Sel;
4592 case X86_SREG_GS: return &pCtx->gs.Sel;
4593 }
4594 AssertFailedReturn(NULL);
4595}
4596
4597
4598/**
4599 * Fetches the selector value of a segment register.
4600 *
4601 * @returns The selector value.
4602 * @param pIemCpu The per CPU data.
4603 * @param iSegReg The segment register.
4604 */
4605IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4606{
4607 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4608 switch (iSegReg)
4609 {
4610 case X86_SREG_ES: return pCtx->es.Sel;
4611 case X86_SREG_CS: return pCtx->cs.Sel;
4612 case X86_SREG_SS: return pCtx->ss.Sel;
4613 case X86_SREG_DS: return pCtx->ds.Sel;
4614 case X86_SREG_FS: return pCtx->fs.Sel;
4615 case X86_SREG_GS: return pCtx->gs.Sel;
4616 }
4617 AssertFailedReturn(0xffff);
4618}
4619
4620
4621/**
4622 * Gets a reference (pointer) to the specified general register.
4623 *
4624 * @returns Register reference.
4625 * @param pIemCpu The per CPU data.
4626 * @param iReg The general register.
4627 */
4628IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4629{
4630 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4631 switch (iReg)
4632 {
4633 case X86_GREG_xAX: return &pCtx->rax;
4634 case X86_GREG_xCX: return &pCtx->rcx;
4635 case X86_GREG_xDX: return &pCtx->rdx;
4636 case X86_GREG_xBX: return &pCtx->rbx;
4637 case X86_GREG_xSP: return &pCtx->rsp;
4638 case X86_GREG_xBP: return &pCtx->rbp;
4639 case X86_GREG_xSI: return &pCtx->rsi;
4640 case X86_GREG_xDI: return &pCtx->rdi;
4641 case X86_GREG_x8: return &pCtx->r8;
4642 case X86_GREG_x9: return &pCtx->r9;
4643 case X86_GREG_x10: return &pCtx->r10;
4644 case X86_GREG_x11: return &pCtx->r11;
4645 case X86_GREG_x12: return &pCtx->r12;
4646 case X86_GREG_x13: return &pCtx->r13;
4647 case X86_GREG_x14: return &pCtx->r14;
4648 case X86_GREG_x15: return &pCtx->r15;
4649 }
4650 AssertFailedReturn(NULL);
4651}
4652
4653
4654/**
4655 * Gets a reference (pointer) to the specified 8-bit general register.
4656 *
4657 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4658 *
4659 * @returns Register reference.
4660 * @param pIemCpu The per CPU data.
4661 * @param iReg The register.
4662 */
4663IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4664{
4665 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4666 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4667
4668 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4669 if (iReg >= 4)
4670 pu8Reg++;
4671 return pu8Reg;
4672}
4673
4674
4675/**
4676 * Fetches the value of a 8-bit general register.
4677 *
4678 * @returns The register value.
4679 * @param pIemCpu The per CPU data.
4680 * @param iReg The register.
4681 */
4682IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4683{
4684 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4685 return *pbSrc;
4686}
4687
4688
4689/**
4690 * Fetches the value of a 16-bit general register.
4691 *
4692 * @returns The register value.
4693 * @param pIemCpu The per CPU data.
4694 * @param iReg The register.
4695 */
4696IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4697{
4698 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4699}
4700
4701
4702/**
4703 * Fetches the value of a 32-bit general register.
4704 *
4705 * @returns The register value.
4706 * @param pIemCpu The per CPU data.
4707 * @param iReg The register.
4708 */
4709IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4710{
4711 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4712}
4713
4714
4715/**
4716 * Fetches the value of a 64-bit general register.
4717 *
4718 * @returns The register value.
4719 * @param pIemCpu The per CPU data.
4720 * @param iReg The register.
4721 */
4722IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4723{
4724 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4725}
4726
4727
4728/**
4729 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4730 *
4731 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4732 * segment limit.
4733 *
4734 * @param pIemCpu The per CPU data.
4735 * @param offNextInstr The offset of the next instruction.
4736 */
4737IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4738{
4739 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4740 switch (pIemCpu->enmEffOpSize)
4741 {
4742 case IEMMODE_16BIT:
4743 {
4744 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4745 if ( uNewIp > pCtx->cs.u32Limit
4746 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4747 return iemRaiseGeneralProtectionFault0(pIemCpu);
4748 pCtx->rip = uNewIp;
4749 break;
4750 }
4751
4752 case IEMMODE_32BIT:
4753 {
4754 Assert(pCtx->rip <= UINT32_MAX);
4755 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4756
4757 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4758 if (uNewEip > pCtx->cs.u32Limit)
4759 return iemRaiseGeneralProtectionFault0(pIemCpu);
4760 pCtx->rip = uNewEip;
4761 break;
4762 }
4763
4764 case IEMMODE_64BIT:
4765 {
4766 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4767
4768 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4769 if (!IEM_IS_CANONICAL(uNewRip))
4770 return iemRaiseGeneralProtectionFault0(pIemCpu);
4771 pCtx->rip = uNewRip;
4772 break;
4773 }
4774
4775 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4776 }
4777
4778 pCtx->eflags.Bits.u1RF = 0;
4779 return VINF_SUCCESS;
4780}
4781
4782
4783/**
4784 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4785 *
4786 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4787 * segment limit.
4788 *
4789 * @returns Strict VBox status code.
4790 * @param pIemCpu The per CPU data.
4791 * @param offNextInstr The offset of the next instruction.
4792 */
4793IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4794{
4795 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4796 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4797
4798 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4799 if ( uNewIp > pCtx->cs.u32Limit
4800 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4801 return iemRaiseGeneralProtectionFault0(pIemCpu);
4802 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4803 pCtx->rip = uNewIp;
4804 pCtx->eflags.Bits.u1RF = 0;
4805
4806 return VINF_SUCCESS;
4807}
4808
4809
4810/**
4811 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4812 *
4813 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4814 * segment limit.
4815 *
4816 * @returns Strict VBox status code.
4817 * @param pIemCpu The per CPU data.
4818 * @param offNextInstr The offset of the next instruction.
4819 */
4820IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4821{
4822 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4823 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4824
4825 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4826 {
4827 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4828
4829 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4830 if (uNewEip > pCtx->cs.u32Limit)
4831 return iemRaiseGeneralProtectionFault0(pIemCpu);
4832 pCtx->rip = uNewEip;
4833 }
4834 else
4835 {
4836 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4837
4838 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4839 if (!IEM_IS_CANONICAL(uNewRip))
4840 return iemRaiseGeneralProtectionFault0(pIemCpu);
4841 pCtx->rip = uNewRip;
4842 }
4843 pCtx->eflags.Bits.u1RF = 0;
4844 return VINF_SUCCESS;
4845}
4846
4847
4848/**
4849 * Performs a near jump to the specified address.
4850 *
4851 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4852 * segment limit.
4853 *
4854 * @param pIemCpu The per CPU data.
4855 * @param uNewRip The new RIP value.
4856 */
4857IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4858{
4859 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4860 switch (pIemCpu->enmEffOpSize)
4861 {
4862 case IEMMODE_16BIT:
4863 {
4864 Assert(uNewRip <= UINT16_MAX);
4865 if ( uNewRip > pCtx->cs.u32Limit
4866 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4867 return iemRaiseGeneralProtectionFault0(pIemCpu);
4868 /** @todo Test 16-bit jump in 64-bit mode. */
4869 pCtx->rip = uNewRip;
4870 break;
4871 }
4872
4873 case IEMMODE_32BIT:
4874 {
4875 Assert(uNewRip <= UINT32_MAX);
4876 Assert(pCtx->rip <= UINT32_MAX);
4877 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4878
4879 if (uNewRip > pCtx->cs.u32Limit)
4880 return iemRaiseGeneralProtectionFault0(pIemCpu);
4881 pCtx->rip = uNewRip;
4882 break;
4883 }
4884
4885 case IEMMODE_64BIT:
4886 {
4887 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4888
4889 if (!IEM_IS_CANONICAL(uNewRip))
4890 return iemRaiseGeneralProtectionFault0(pIemCpu);
4891 pCtx->rip = uNewRip;
4892 break;
4893 }
4894
4895 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4896 }
4897
4898 pCtx->eflags.Bits.u1RF = 0;
4899 return VINF_SUCCESS;
4900}
4901
4902
4903/**
4904 * Get the address of the top of the stack.
4905 *
4906 * @param pIemCpu The per CPU data.
4907 * @param pCtx The CPU context which SP/ESP/RSP should be
4908 * read.
4909 */
4910DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4911{
4912 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4913 return pCtx->rsp;
4914 if (pCtx->ss.Attr.n.u1DefBig)
4915 return pCtx->esp;
4916 return pCtx->sp;
4917}
4918
4919
4920/**
4921 * Updates the RIP/EIP/IP to point to the next instruction.
4922 *
4923 * This function leaves the EFLAGS.RF flag alone.
4924 *
4925 * @param pIemCpu The per CPU data.
4926 * @param cbInstr The number of bytes to add.
4927 */
4928IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4929{
4930 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4931 switch (pIemCpu->enmCpuMode)
4932 {
4933 case IEMMODE_16BIT:
4934 Assert(pCtx->rip <= UINT16_MAX);
4935 pCtx->eip += cbInstr;
4936 pCtx->eip &= UINT32_C(0xffff);
4937 break;
4938
4939 case IEMMODE_32BIT:
4940 pCtx->eip += cbInstr;
4941 Assert(pCtx->rip <= UINT32_MAX);
4942 break;
4943
4944 case IEMMODE_64BIT:
4945 pCtx->rip += cbInstr;
4946 break;
4947 default: AssertFailed();
4948 }
4949}
4950
4951
4952#if 0
4953/**
4954 * Updates the RIP/EIP/IP to point to the next instruction.
4955 *
4956 * @param pIemCpu The per CPU data.
4957 */
4958IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4959{
4960 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4961}
4962#endif
4963
4964
4965
4966/**
4967 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4968 *
4969 * @param pIemCpu The per CPU data.
4970 * @param cbInstr The number of bytes to add.
4971 */
4972IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4973{
4974 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4975
4976 pCtx->eflags.Bits.u1RF = 0;
4977
4978 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4979 switch (pIemCpu->enmCpuMode)
4980 {
4981 /** @todo investigate if EIP or RIP is really incremented. */
4982 case IEMMODE_16BIT:
4983 case IEMMODE_32BIT:
4984 pCtx->eip += cbInstr;
4985 Assert(pCtx->rip <= UINT32_MAX);
4986 break;
4987
4988 case IEMMODE_64BIT:
4989 pCtx->rip += cbInstr;
4990 break;
4991 default: AssertFailed();
4992 }
4993}
4994
4995
4996/**
4997 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4998 *
4999 * @param pIemCpu The per CPU data.
5000 */
5001IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
5002{
5003 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
5004}
5005
5006
5007/**
5008 * Adds to the stack pointer.
5009 *
5010 * @param pIemCpu The per CPU data.
5011 * @param pCtx The CPU context which SP/ESP/RSP should be
5012 * updated.
5013 * @param cbToAdd The number of bytes to add.
5014 */
5015DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
5016{
5017 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5018 pCtx->rsp += cbToAdd;
5019 else if (pCtx->ss.Attr.n.u1DefBig)
5020 pCtx->esp += cbToAdd;
5021 else
5022 pCtx->sp += cbToAdd;
5023}
5024
5025
5026/**
5027 * Subtracts from the stack pointer.
5028 *
5029 * @param pIemCpu The per CPU data.
5030 * @param pCtx The CPU context which SP/ESP/RSP should be
5031 * updated.
5032 * @param cbToSub The number of bytes to subtract.
5033 */
5034DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
5035{
5036 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5037 pCtx->rsp -= cbToSub;
5038 else if (pCtx->ss.Attr.n.u1DefBig)
5039 pCtx->esp -= cbToSub;
5040 else
5041 pCtx->sp -= cbToSub;
5042}
5043
5044
5045/**
5046 * Adds to the temporary stack pointer.
5047 *
5048 * @param pIemCpu The per CPU data.
5049 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5050 * @param cbToAdd The number of bytes to add.
5051 * @param pCtx Where to get the current stack mode.
5052 */
5053DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
5054{
5055 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5056 pTmpRsp->u += cbToAdd;
5057 else if (pCtx->ss.Attr.n.u1DefBig)
5058 pTmpRsp->DWords.dw0 += cbToAdd;
5059 else
5060 pTmpRsp->Words.w0 += cbToAdd;
5061}
5062
5063
5064/**
5065 * Subtracts from the temporary stack pointer.
5066 *
5067 * @param pIemCpu The per CPU data.
5068 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5069 * @param cbToSub The number of bytes to subtract.
5070 * @param pCtx Where to get the current stack mode.
5071 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5072 * expecting that.
5073 */
5074DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5075{
5076 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5077 pTmpRsp->u -= cbToSub;
5078 else if (pCtx->ss.Attr.n.u1DefBig)
5079 pTmpRsp->DWords.dw0 -= cbToSub;
5080 else
5081 pTmpRsp->Words.w0 -= cbToSub;
5082}
5083
5084
5085/**
5086 * Calculates the effective stack address for a push of the specified size as
5087 * well as the new RSP value (upper bits may be masked).
5088 *
5089 * @returns Effective stack addressf for the push.
5090 * @param pIemCpu The IEM per CPU data.
5091 * @param pCtx Where to get the current stack mode.
5092 * @param cbItem The size of the stack item to pop.
5093 * @param puNewRsp Where to return the new RSP value.
5094 */
5095DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5096{
5097 RTUINT64U uTmpRsp;
5098 RTGCPTR GCPtrTop;
5099 uTmpRsp.u = pCtx->rsp;
5100
5101 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5102 GCPtrTop = uTmpRsp.u -= cbItem;
5103 else if (pCtx->ss.Attr.n.u1DefBig)
5104 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5105 else
5106 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5107 *puNewRsp = uTmpRsp.u;
5108 return GCPtrTop;
5109}
5110
5111
5112/**
5113 * Gets the current stack pointer and calculates the value after a pop of the
5114 * specified size.
5115 *
5116 * @returns Current stack pointer.
5117 * @param pIemCpu The per CPU data.
5118 * @param pCtx Where to get the current stack mode.
5119 * @param cbItem The size of the stack item to pop.
5120 * @param puNewRsp Where to return the new RSP value.
5121 */
5122DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5123{
5124 RTUINT64U uTmpRsp;
5125 RTGCPTR GCPtrTop;
5126 uTmpRsp.u = pCtx->rsp;
5127
5128 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5129 {
5130 GCPtrTop = uTmpRsp.u;
5131 uTmpRsp.u += cbItem;
5132 }
5133 else if (pCtx->ss.Attr.n.u1DefBig)
5134 {
5135 GCPtrTop = uTmpRsp.DWords.dw0;
5136 uTmpRsp.DWords.dw0 += cbItem;
5137 }
5138 else
5139 {
5140 GCPtrTop = uTmpRsp.Words.w0;
5141 uTmpRsp.Words.w0 += cbItem;
5142 }
5143 *puNewRsp = uTmpRsp.u;
5144 return GCPtrTop;
5145}
5146
5147
5148/**
5149 * Calculates the effective stack address for a push of the specified size as
5150 * well as the new temporary RSP value (upper bits may be masked).
5151 *
5152 * @returns Effective stack addressf for the push.
5153 * @param pIemCpu The per CPU data.
5154 * @param pCtx Where to get the current stack mode.
5155 * @param pTmpRsp The temporary stack pointer. This is updated.
5156 * @param cbItem The size of the stack item to pop.
5157 */
5158DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5159{
5160 RTGCPTR GCPtrTop;
5161
5162 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5163 GCPtrTop = pTmpRsp->u -= cbItem;
5164 else if (pCtx->ss.Attr.n.u1DefBig)
5165 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5166 else
5167 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5168 return GCPtrTop;
5169}
5170
5171
5172/**
5173 * Gets the effective stack address for a pop of the specified size and
5174 * calculates and updates the temporary RSP.
5175 *
5176 * @returns Current stack pointer.
5177 * @param pIemCpu The per CPU data.
5178 * @param pCtx Where to get the current stack mode.
5179 * @param pTmpRsp The temporary stack pointer. This is updated.
5180 * @param cbItem The size of the stack item to pop.
5181 */
5182DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5183{
5184 RTGCPTR GCPtrTop;
5185 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5186 {
5187 GCPtrTop = pTmpRsp->u;
5188 pTmpRsp->u += cbItem;
5189 }
5190 else if (pCtx->ss.Attr.n.u1DefBig)
5191 {
5192 GCPtrTop = pTmpRsp->DWords.dw0;
5193 pTmpRsp->DWords.dw0 += cbItem;
5194 }
5195 else
5196 {
5197 GCPtrTop = pTmpRsp->Words.w0;
5198 pTmpRsp->Words.w0 += cbItem;
5199 }
5200 return GCPtrTop;
5201}
5202
5203/** @} */
5204
5205
5206/** @name FPU access and helpers.
5207 *
5208 * @{
5209 */
5210
5211
5212/**
5213 * Hook for preparing to use the host FPU.
5214 *
5215 * This is necessary in ring-0 and raw-mode context.
5216 *
5217 * @param pIemCpu The IEM per CPU data.
5218 */
5219DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5220{
5221#ifdef IN_RING3
5222 NOREF(pIemCpu);
5223#else
5224/** @todo RZ: FIXME */
5225//# error "Implement me"
5226#endif
5227}
5228
5229
5230/**
5231 * Hook for preparing to use the host FPU for SSE
5232 *
5233 * This is necessary in ring-0 and raw-mode context.
5234 *
5235 * @param pIemCpu The IEM per CPU data.
5236 */
5237DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5238{
5239 iemFpuPrepareUsage(pIemCpu);
5240}
5241
5242
5243/**
5244 * Stores a QNaN value into a FPU register.
5245 *
5246 * @param pReg Pointer to the register.
5247 */
5248DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5249{
5250 pReg->au32[0] = UINT32_C(0x00000000);
5251 pReg->au32[1] = UINT32_C(0xc0000000);
5252 pReg->au16[4] = UINT16_C(0xffff);
5253}
5254
5255
5256/**
5257 * Updates the FOP, FPU.CS and FPUIP registers.
5258 *
5259 * @param pIemCpu The IEM per CPU data.
5260 * @param pCtx The CPU context.
5261 * @param pFpuCtx The FPU context.
5262 */
5263DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5264{
5265 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5266 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5267 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5268 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5269 {
5270 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5271 * happens in real mode here based on the fnsave and fnstenv images. */
5272 pFpuCtx->CS = 0;
5273 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5274 }
5275 else
5276 {
5277 pFpuCtx->CS = pCtx->cs.Sel;
5278 pFpuCtx->FPUIP = pCtx->rip;
5279 }
5280}
5281
5282
5283/**
5284 * Updates the x87.DS and FPUDP registers.
5285 *
5286 * @param pIemCpu The IEM per CPU data.
5287 * @param pCtx The CPU context.
5288 * @param pFpuCtx The FPU context.
5289 * @param iEffSeg The effective segment register.
5290 * @param GCPtrEff The effective address relative to @a iEffSeg.
5291 */
5292DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5293{
5294 RTSEL sel;
5295 switch (iEffSeg)
5296 {
5297 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5298 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5299 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5300 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5301 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5302 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5303 default:
5304 AssertMsgFailed(("%d\n", iEffSeg));
5305 sel = pCtx->ds.Sel;
5306 }
5307 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5308 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5309 {
5310 pFpuCtx->DS = 0;
5311 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5312 }
5313 else
5314 {
5315 pFpuCtx->DS = sel;
5316 pFpuCtx->FPUDP = GCPtrEff;
5317 }
5318}
5319
5320
5321/**
5322 * Rotates the stack registers in the push direction.
5323 *
5324 * @param pFpuCtx The FPU context.
5325 * @remarks This is a complete waste of time, but fxsave stores the registers in
5326 * stack order.
5327 */
5328DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5329{
5330 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5331 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5332 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5333 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5334 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5335 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5336 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5337 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5338 pFpuCtx->aRegs[0].r80 = r80Tmp;
5339}
5340
5341
5342/**
5343 * Rotates the stack registers in the pop direction.
5344 *
5345 * @param pFpuCtx The FPU context.
5346 * @remarks This is a complete waste of time, but fxsave stores the registers in
5347 * stack order.
5348 */
5349DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5350{
5351 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5352 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5353 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5354 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5355 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5356 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5357 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5358 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5359 pFpuCtx->aRegs[7].r80 = r80Tmp;
5360}
5361
5362
5363/**
5364 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5365 * exception prevents it.
5366 *
5367 * @param pIemCpu The IEM per CPU data.
5368 * @param pResult The FPU operation result to push.
5369 * @param pFpuCtx The FPU context.
5370 */
5371IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5372{
5373 /* Update FSW and bail if there are pending exceptions afterwards. */
5374 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5375 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5376 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5377 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5378 {
5379 pFpuCtx->FSW = fFsw;
5380 return;
5381 }
5382
5383 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5384 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5385 {
5386 /* All is fine, push the actual value. */
5387 pFpuCtx->FTW |= RT_BIT(iNewTop);
5388 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5389 }
5390 else if (pFpuCtx->FCW & X86_FCW_IM)
5391 {
5392 /* Masked stack overflow, push QNaN. */
5393 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5394 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5395 }
5396 else
5397 {
5398 /* Raise stack overflow, don't push anything. */
5399 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5400 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5401 return;
5402 }
5403
5404 fFsw &= ~X86_FSW_TOP_MASK;
5405 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5406 pFpuCtx->FSW = fFsw;
5407
5408 iemFpuRotateStackPush(pFpuCtx);
5409}
5410
5411
5412/**
5413 * Stores a result in a FPU register and updates the FSW and FTW.
5414 *
5415 * @param pFpuCtx The FPU context.
5416 * @param pResult The result to store.
5417 * @param iStReg Which FPU register to store it in.
5418 */
5419IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5420{
5421 Assert(iStReg < 8);
5422 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5423 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5424 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5425 pFpuCtx->FTW |= RT_BIT(iReg);
5426 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5427}
5428
5429
5430/**
5431 * Only updates the FPU status word (FSW) with the result of the current
5432 * instruction.
5433 *
5434 * @param pFpuCtx The FPU context.
5435 * @param u16FSW The FSW output of the current instruction.
5436 */
5437IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5438{
5439 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5440 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5441}
5442
5443
5444/**
5445 * Pops one item off the FPU stack if no pending exception prevents it.
5446 *
5447 * @param pFpuCtx The FPU context.
5448 */
5449IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5450{
5451 /* Check pending exceptions. */
5452 uint16_t uFSW = pFpuCtx->FSW;
5453 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5454 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5455 return;
5456
5457 /* TOP--. */
5458 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5459 uFSW &= ~X86_FSW_TOP_MASK;
5460 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5461 pFpuCtx->FSW = uFSW;
5462
5463 /* Mark the previous ST0 as empty. */
5464 iOldTop >>= X86_FSW_TOP_SHIFT;
5465 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5466
5467 /* Rotate the registers. */
5468 iemFpuRotateStackPop(pFpuCtx);
5469}
5470
5471
5472/**
5473 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5474 *
5475 * @param pIemCpu The IEM per CPU data.
5476 * @param pResult The FPU operation result to push.
5477 */
5478IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5479{
5480 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5481 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5482 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5483 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5484}
5485
5486
5487/**
5488 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5489 * and sets FPUDP and FPUDS.
5490 *
5491 * @param pIemCpu The IEM per CPU data.
5492 * @param pResult The FPU operation result to push.
5493 * @param iEffSeg The effective segment register.
5494 * @param GCPtrEff The effective address relative to @a iEffSeg.
5495 */
5496IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5497{
5498 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5499 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5500 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5501 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5502 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5503}
5504
5505
5506/**
5507 * Replace ST0 with the first value and push the second onto the FPU stack,
5508 * unless a pending exception prevents it.
5509 *
5510 * @param pIemCpu The IEM per CPU data.
5511 * @param pResult The FPU operation result to store and push.
5512 */
5513IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5514{
5515 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5516 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5517 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5518
5519 /* Update FSW and bail if there are pending exceptions afterwards. */
5520 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5521 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5522 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5523 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5524 {
5525 pFpuCtx->FSW = fFsw;
5526 return;
5527 }
5528
5529 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5530 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5531 {
5532 /* All is fine, push the actual value. */
5533 pFpuCtx->FTW |= RT_BIT(iNewTop);
5534 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5535 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5536 }
5537 else if (pFpuCtx->FCW & X86_FCW_IM)
5538 {
5539 /* Masked stack overflow, push QNaN. */
5540 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5541 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5542 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5543 }
5544 else
5545 {
5546 /* Raise stack overflow, don't push anything. */
5547 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5548 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5549 return;
5550 }
5551
5552 fFsw &= ~X86_FSW_TOP_MASK;
5553 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5554 pFpuCtx->FSW = fFsw;
5555
5556 iemFpuRotateStackPush(pFpuCtx);
5557}
5558
5559
5560/**
5561 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5562 * FOP.
5563 *
5564 * @param pIemCpu The IEM per CPU data.
5565 * @param pResult The result to store.
5566 * @param iStReg Which FPU register to store it in.
5567 */
5568IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5569{
5570 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5571 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5572 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5573 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5574}
5575
5576
5577/**
5578 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5579 * FOP, and then pops the stack.
5580 *
5581 * @param pIemCpu The IEM per CPU data.
5582 * @param pResult The result to store.
5583 * @param iStReg Which FPU register to store it in.
5584 */
5585IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5586{
5587 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5588 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5589 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5590 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5591 iemFpuMaybePopOne(pFpuCtx);
5592}
5593
5594
5595/**
5596 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5597 * FPUDP, and FPUDS.
5598 *
5599 * @param pIemCpu The IEM per CPU data.
5600 * @param pResult The result to store.
5601 * @param iStReg Which FPU register to store it in.
5602 * @param iEffSeg The effective memory operand selector register.
5603 * @param GCPtrEff The effective memory operand offset.
5604 */
5605IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5606 uint8_t iEffSeg, RTGCPTR GCPtrEff)
5607{
5608 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5609 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5610 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5611 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5612 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5613}
5614
5615
5616/**
5617 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5618 * FPUDP, and FPUDS, and then pops the stack.
5619 *
5620 * @param pIemCpu The IEM per CPU data.
5621 * @param pResult The result to store.
5622 * @param iStReg Which FPU register to store it in.
5623 * @param iEffSeg The effective memory operand selector register.
5624 * @param GCPtrEff The effective memory operand offset.
5625 */
5626IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5627 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5628{
5629 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5630 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5631 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5632 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5633 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5634 iemFpuMaybePopOne(pFpuCtx);
5635}
5636
5637
5638/**
5639 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5640 *
5641 * @param pIemCpu The IEM per CPU data.
5642 */
5643IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5644{
5645 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5646 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5647 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5648}
5649
5650
5651/**
5652 * Marks the specified stack register as free (for FFREE).
5653 *
5654 * @param pIemCpu The IEM per CPU data.
5655 * @param iStReg The register to free.
5656 */
5657IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5658{
5659 Assert(iStReg < 8);
5660 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5661 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5662 pFpuCtx->FTW &= ~RT_BIT(iReg);
5663}
5664
5665
5666/**
5667 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5668 *
5669 * @param pIemCpu The IEM per CPU data.
5670 */
5671IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5672{
5673 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5674 uint16_t uFsw = pFpuCtx->FSW;
5675 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5676 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5677 uFsw &= ~X86_FSW_TOP_MASK;
5678 uFsw |= uTop;
5679 pFpuCtx->FSW = uFsw;
5680}
5681
5682
5683/**
5684 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5685 *
5686 * @param pIemCpu The IEM per CPU data.
5687 */
5688IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5689{
5690 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5691 uint16_t uFsw = pFpuCtx->FSW;
5692 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5693 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5694 uFsw &= ~X86_FSW_TOP_MASK;
5695 uFsw |= uTop;
5696 pFpuCtx->FSW = uFsw;
5697}
5698
5699
5700/**
5701 * Updates the FSW, FOP, FPUIP, and FPUCS.
5702 *
5703 * @param pIemCpu The IEM per CPU data.
5704 * @param u16FSW The FSW from the current instruction.
5705 */
5706IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5707{
5708 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5709 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5710 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5711 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5712}
5713
5714
5715/**
5716 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5717 *
5718 * @param pIemCpu The IEM per CPU data.
5719 * @param u16FSW The FSW from the current instruction.
5720 */
5721IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5722{
5723 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5724 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5725 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5726 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5727 iemFpuMaybePopOne(pFpuCtx);
5728}
5729
5730
5731/**
5732 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5733 *
5734 * @param pIemCpu The IEM per CPU data.
5735 * @param u16FSW The FSW from the current instruction.
5736 * @param iEffSeg The effective memory operand selector register.
5737 * @param GCPtrEff The effective memory operand offset.
5738 */
5739IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5740{
5741 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5742 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5743 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5744 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5745 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5746}
5747
5748
5749/**
5750 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5751 *
5752 * @param pIemCpu The IEM per CPU data.
5753 * @param u16FSW The FSW from the current instruction.
5754 */
5755IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5756{
5757 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5758 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5759 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5760 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5761 iemFpuMaybePopOne(pFpuCtx);
5762 iemFpuMaybePopOne(pFpuCtx);
5763}
5764
5765
5766/**
5767 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5768 *
5769 * @param pIemCpu The IEM per CPU data.
5770 * @param u16FSW The FSW from the current instruction.
5771 * @param iEffSeg The effective memory operand selector register.
5772 * @param GCPtrEff The effective memory operand offset.
5773 */
5774IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5775{
5776 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5777 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5778 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5779 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5780 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5781 iemFpuMaybePopOne(pFpuCtx);
5782}
5783
5784
5785/**
5786 * Worker routine for raising an FPU stack underflow exception.
5787 *
5788 * @param pIemCpu The IEM per CPU data.
5789 * @param pFpuCtx The FPU context.
5790 * @param iStReg The stack register being accessed.
5791 */
5792IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5793{
5794 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5795 if (pFpuCtx->FCW & X86_FCW_IM)
5796 {
5797 /* Masked underflow. */
5798 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5799 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5800 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5801 if (iStReg != UINT8_MAX)
5802 {
5803 pFpuCtx->FTW |= RT_BIT(iReg);
5804 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5805 }
5806 }
5807 else
5808 {
5809 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5810 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5811 }
5812}
5813
5814
5815/**
5816 * Raises a FPU stack underflow exception.
5817 *
5818 * @param pIemCpu The IEM per CPU data.
5819 * @param iStReg The destination register that should be loaded
5820 * with QNaN if \#IS is not masked. Specify
5821 * UINT8_MAX if none (like for fcom).
5822 */
5823DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5824{
5825 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5826 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5827 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5828 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5829}
5830
5831
5832DECL_NO_INLINE(IEM_STATIC, void)
5833iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5834{
5835 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5836 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5837 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5838 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5839 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5840}
5841
5842
5843DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5844{
5845 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5846 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5847 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5848 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5849 iemFpuMaybePopOne(pFpuCtx);
5850}
5851
5852
5853DECL_NO_INLINE(IEM_STATIC, void)
5854iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5855{
5856 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5857 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5858 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5859 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5860 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5861 iemFpuMaybePopOne(pFpuCtx);
5862}
5863
5864
5865DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5866{
5867 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5868 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5869 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5870 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5871 iemFpuMaybePopOne(pFpuCtx);
5872 iemFpuMaybePopOne(pFpuCtx);
5873}
5874
5875
5876DECL_NO_INLINE(IEM_STATIC, void)
5877iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5878{
5879 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5880 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5881 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5882
5883 if (pFpuCtx->FCW & X86_FCW_IM)
5884 {
5885 /* Masked overflow - Push QNaN. */
5886 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5887 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5888 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5889 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5890 pFpuCtx->FTW |= RT_BIT(iNewTop);
5891 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5892 iemFpuRotateStackPush(pFpuCtx);
5893 }
5894 else
5895 {
5896 /* Exception pending - don't change TOP or the register stack. */
5897 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5898 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5899 }
5900}
5901
5902
5903DECL_NO_INLINE(IEM_STATIC, void)
5904iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5905{
5906 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5907 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5908 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5909
5910 if (pFpuCtx->FCW & X86_FCW_IM)
5911 {
5912 /* Masked overflow - Push QNaN. */
5913 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5914 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5915 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5916 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5917 pFpuCtx->FTW |= RT_BIT(iNewTop);
5918 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5919 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5920 iemFpuRotateStackPush(pFpuCtx);
5921 }
5922 else
5923 {
5924 /* Exception pending - don't change TOP or the register stack. */
5925 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5926 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5927 }
5928}
5929
5930
5931/**
5932 * Worker routine for raising an FPU stack overflow exception on a push.
5933 *
5934 * @param pFpuCtx The FPU context.
5935 */
5936IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5937{
5938 if (pFpuCtx->FCW & X86_FCW_IM)
5939 {
5940 /* Masked overflow. */
5941 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5942 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5943 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5944 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5945 pFpuCtx->FTW |= RT_BIT(iNewTop);
5946 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5947 iemFpuRotateStackPush(pFpuCtx);
5948 }
5949 else
5950 {
5951 /* Exception pending - don't change TOP or the register stack. */
5952 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5953 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5954 }
5955}
5956
5957
5958/**
5959 * Raises a FPU stack overflow exception on a push.
5960 *
5961 * @param pIemCpu The IEM per CPU data.
5962 */
5963DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5964{
5965 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5966 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5967 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5968 iemFpuStackPushOverflowOnly(pFpuCtx);
5969}
5970
5971
5972/**
5973 * Raises a FPU stack overflow exception on a push with a memory operand.
5974 *
5975 * @param pIemCpu The IEM per CPU data.
5976 * @param iEffSeg The effective memory operand selector register.
5977 * @param GCPtrEff The effective memory operand offset.
5978 */
5979DECL_NO_INLINE(IEM_STATIC, void)
5980iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5981{
5982 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5983 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5984 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5985 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5986 iemFpuStackPushOverflowOnly(pFpuCtx);
5987}
5988
5989
5990IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5991{
5992 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5993 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5994 if (pFpuCtx->FTW & RT_BIT(iReg))
5995 return VINF_SUCCESS;
5996 return VERR_NOT_FOUND;
5997}
5998
5999
6000IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
6001{
6002 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6003 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6004 if (pFpuCtx->FTW & RT_BIT(iReg))
6005 {
6006 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
6007 return VINF_SUCCESS;
6008 }
6009 return VERR_NOT_FOUND;
6010}
6011
6012
6013IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
6014 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
6015{
6016 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6017 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6018 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6019 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6020 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6021 {
6022 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6023 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
6024 return VINF_SUCCESS;
6025 }
6026 return VERR_NOT_FOUND;
6027}
6028
6029
6030IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
6031{
6032 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6033 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6034 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6035 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6036 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6037 {
6038 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6039 return VINF_SUCCESS;
6040 }
6041 return VERR_NOT_FOUND;
6042}
6043
6044
6045/**
6046 * Updates the FPU exception status after FCW is changed.
6047 *
6048 * @param pFpuCtx The FPU context.
6049 */
6050IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
6051{
6052 uint16_t u16Fsw = pFpuCtx->FSW;
6053 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
6054 u16Fsw |= X86_FSW_ES | X86_FSW_B;
6055 else
6056 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
6057 pFpuCtx->FSW = u16Fsw;
6058}
6059
6060
6061/**
6062 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6063 *
6064 * @returns The full FTW.
6065 * @param pFpuCtx The FPU context.
6066 */
6067IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6068{
6069 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6070 uint16_t u16Ftw = 0;
6071 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6072 for (unsigned iSt = 0; iSt < 8; iSt++)
6073 {
6074 unsigned const iReg = (iSt + iTop) & 7;
6075 if (!(u8Ftw & RT_BIT(iReg)))
6076 u16Ftw |= 3 << (iReg * 2); /* empty */
6077 else
6078 {
6079 uint16_t uTag;
6080 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6081 if (pr80Reg->s.uExponent == 0x7fff)
6082 uTag = 2; /* Exponent is all 1's => Special. */
6083 else if (pr80Reg->s.uExponent == 0x0000)
6084 {
6085 if (pr80Reg->s.u64Mantissa == 0x0000)
6086 uTag = 1; /* All bits are zero => Zero. */
6087 else
6088 uTag = 2; /* Must be special. */
6089 }
6090 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6091 uTag = 0; /* Valid. */
6092 else
6093 uTag = 2; /* Must be special. */
6094
6095 u16Ftw |= uTag << (iReg * 2); /* empty */
6096 }
6097 }
6098
6099 return u16Ftw;
6100}
6101
6102
6103/**
6104 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6105 *
6106 * @returns The compressed FTW.
6107 * @param u16FullFtw The full FTW to convert.
6108 */
6109IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6110{
6111 uint8_t u8Ftw = 0;
6112 for (unsigned i = 0; i < 8; i++)
6113 {
6114 if ((u16FullFtw & 3) != 3 /*empty*/)
6115 u8Ftw |= RT_BIT(i);
6116 u16FullFtw >>= 2;
6117 }
6118
6119 return u8Ftw;
6120}
6121
6122/** @} */
6123
6124
6125/** @name Memory access.
6126 *
6127 * @{
6128 */
6129
6130
6131/**
6132 * Updates the IEMCPU::cbWritten counter if applicable.
6133 *
6134 * @param pIemCpu The IEM per CPU data.
6135 * @param fAccess The access being accounted for.
6136 * @param cbMem The access size.
6137 */
6138DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6139{
6140 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6141 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6142 pIemCpu->cbWritten += (uint32_t)cbMem;
6143}
6144
6145
6146/**
6147 * Checks if the given segment can be written to, raise the appropriate
6148 * exception if not.
6149 *
6150 * @returns VBox strict status code.
6151 *
6152 * @param pIemCpu The IEM per CPU data.
6153 * @param pHid Pointer to the hidden register.
6154 * @param iSegReg The register number.
6155 * @param pu64BaseAddr Where to return the base address to use for the
6156 * segment. (In 64-bit code it may differ from the
6157 * base in the hidden segment.)
6158 */
6159IEM_STATIC VBOXSTRICTRC
6160iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6161{
6162 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6163 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6164 else
6165 {
6166 if (!pHid->Attr.n.u1Present)
6167 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6168
6169 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6170 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6171 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6172 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6173 *pu64BaseAddr = pHid->u64Base;
6174 }
6175 return VINF_SUCCESS;
6176}
6177
6178
6179/**
6180 * Checks if the given segment can be read from, raise the appropriate
6181 * exception if not.
6182 *
6183 * @returns VBox strict status code.
6184 *
6185 * @param pIemCpu The IEM per CPU data.
6186 * @param pHid Pointer to the hidden register.
6187 * @param iSegReg The register number.
6188 * @param pu64BaseAddr Where to return the base address to use for the
6189 * segment. (In 64-bit code it may differ from the
6190 * base in the hidden segment.)
6191 */
6192IEM_STATIC VBOXSTRICTRC
6193iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6194{
6195 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6196 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6197 else
6198 {
6199 if (!pHid->Attr.n.u1Present)
6200 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6201
6202 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6203 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6204 *pu64BaseAddr = pHid->u64Base;
6205 }
6206 return VINF_SUCCESS;
6207}
6208
6209
6210/**
6211 * Applies the segment limit, base and attributes.
6212 *
6213 * This may raise a \#GP or \#SS.
6214 *
6215 * @returns VBox strict status code.
6216 *
6217 * @param pIemCpu The IEM per CPU data.
6218 * @param fAccess The kind of access which is being performed.
6219 * @param iSegReg The index of the segment register to apply.
6220 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6221 * TSS, ++).
6222 * @param cbMem The access size.
6223 * @param pGCPtrMem Pointer to the guest memory address to apply
6224 * segmentation to. Input and output parameter.
6225 */
6226IEM_STATIC VBOXSTRICTRC
6227iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6228{
6229 if (iSegReg == UINT8_MAX)
6230 return VINF_SUCCESS;
6231
6232 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6233 switch (pIemCpu->enmCpuMode)
6234 {
6235 case IEMMODE_16BIT:
6236 case IEMMODE_32BIT:
6237 {
6238 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6239 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6240
6241 if ( pSel->Attr.n.u1Present
6242 && !pSel->Attr.n.u1Unusable)
6243 {
6244 Assert(pSel->Attr.n.u1DescType);
6245 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6246 {
6247 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6248 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6249 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6250
6251 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6252 {
6253 /** @todo CPL check. */
6254 }
6255
6256 /*
6257 * There are two kinds of data selectors, normal and expand down.
6258 */
6259 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6260 {
6261 if ( GCPtrFirst32 > pSel->u32Limit
6262 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6263 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6264 }
6265 else
6266 {
6267 /*
6268 * The upper boundary is defined by the B bit, not the G bit!
6269 */
6270 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6271 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6272 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6273 }
6274 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6275 }
6276 else
6277 {
6278
6279 /*
6280 * Code selector and usually be used to read thru, writing is
6281 * only permitted in real and V8086 mode.
6282 */
6283 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6284 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6285 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6286 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6287 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6288
6289 if ( GCPtrFirst32 > pSel->u32Limit
6290 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6291 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6292
6293 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6294 {
6295 /** @todo CPL check. */
6296 }
6297
6298 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6299 }
6300 }
6301 else
6302 return iemRaiseGeneralProtectionFault0(pIemCpu);
6303 return VINF_SUCCESS;
6304 }
6305
6306 case IEMMODE_64BIT:
6307 {
6308 RTGCPTR GCPtrMem = *pGCPtrMem;
6309 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6310 *pGCPtrMem = GCPtrMem + pSel->u64Base;
6311
6312 Assert(cbMem >= 1);
6313 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
6314 return VINF_SUCCESS;
6315 return iemRaiseGeneralProtectionFault0(pIemCpu);
6316 }
6317
6318 default:
6319 AssertFailedReturn(VERR_IEM_IPE_7);
6320 }
6321}
6322
6323
6324/**
6325 * Translates a virtual address to a physical physical address and checks if we
6326 * can access the page as specified.
6327 *
6328 * @param pIemCpu The IEM per CPU data.
6329 * @param GCPtrMem The virtual address.
6330 * @param fAccess The intended access.
6331 * @param pGCPhysMem Where to return the physical address.
6332 */
6333IEM_STATIC VBOXSTRICTRC
6334iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6335{
6336 /** @todo Need a different PGM interface here. We're currently using
6337 * generic / REM interfaces. this won't cut it for R0 & RC. */
6338 RTGCPHYS GCPhys;
6339 uint64_t fFlags;
6340 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6341 if (RT_FAILURE(rc))
6342 {
6343 /** @todo Check unassigned memory in unpaged mode. */
6344 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6345 *pGCPhysMem = NIL_RTGCPHYS;
6346 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6347 }
6348
6349 /* If the page is writable and does not have the no-exec bit set, all
6350 access is allowed. Otherwise we'll have to check more carefully... */
6351 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6352 {
6353 /* Write to read only memory? */
6354 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6355 && !(fFlags & X86_PTE_RW)
6356 && ( pIemCpu->uCpl != 0
6357 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6358 {
6359 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6360 *pGCPhysMem = NIL_RTGCPHYS;
6361 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6362 }
6363
6364 /* Kernel memory accessed by userland? */
6365 if ( !(fFlags & X86_PTE_US)
6366 && pIemCpu->uCpl == 3
6367 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6368 {
6369 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6370 *pGCPhysMem = NIL_RTGCPHYS;
6371 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6372 }
6373
6374 /* Executing non-executable memory? */
6375 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6376 && (fFlags & X86_PTE_PAE_NX)
6377 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6378 {
6379 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6380 *pGCPhysMem = NIL_RTGCPHYS;
6381 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6382 VERR_ACCESS_DENIED);
6383 }
6384 }
6385
6386 /*
6387 * Set the dirty / access flags.
6388 * ASSUMES this is set when the address is translated rather than on committ...
6389 */
6390 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6391 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6392 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6393 {
6394 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6395 AssertRC(rc2);
6396 }
6397
6398 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6399 *pGCPhysMem = GCPhys;
6400 return VINF_SUCCESS;
6401}
6402
6403
6404
6405/**
6406 * Maps a physical page.
6407 *
6408 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6409 * @param pIemCpu The IEM per CPU data.
6410 * @param GCPhysMem The physical address.
6411 * @param fAccess The intended access.
6412 * @param ppvMem Where to return the mapping address.
6413 * @param pLock The PGM lock.
6414 */
6415IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6416{
6417#ifdef IEM_VERIFICATION_MODE_FULL
6418 /* Force the alternative path so we can ignore writes. */
6419 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6420 {
6421 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6422 {
6423 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6424 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6425 if (RT_FAILURE(rc2))
6426 pIemCpu->fProblematicMemory = true;
6427 }
6428 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6429 }
6430#endif
6431#ifdef IEM_LOG_MEMORY_WRITES
6432 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6433 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6434#endif
6435#ifdef IEM_VERIFICATION_MODE_MINIMAL
6436 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6437#endif
6438
6439 /** @todo This API may require some improving later. A private deal with PGM
6440 * regarding locking and unlocking needs to be struct. A couple of TLBs
6441 * living in PGM, but with publicly accessible inlined access methods
6442 * could perhaps be an even better solution. */
6443 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6444 GCPhysMem,
6445 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6446 pIemCpu->fBypassHandlers,
6447 ppvMem,
6448 pLock);
6449 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6450 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6451
6452#ifdef IEM_VERIFICATION_MODE_FULL
6453 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6454 pIemCpu->fProblematicMemory = true;
6455#endif
6456 return rc;
6457}
6458
6459
6460/**
6461 * Unmap a page previously mapped by iemMemPageMap.
6462 *
6463 * @param pIemCpu The IEM per CPU data.
6464 * @param GCPhysMem The physical address.
6465 * @param fAccess The intended access.
6466 * @param pvMem What iemMemPageMap returned.
6467 * @param pLock The PGM lock.
6468 */
6469DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6470{
6471 NOREF(pIemCpu);
6472 NOREF(GCPhysMem);
6473 NOREF(fAccess);
6474 NOREF(pvMem);
6475 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6476}
6477
6478
6479/**
6480 * Looks up a memory mapping entry.
6481 *
6482 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6483 * @param pIemCpu The IEM per CPU data.
6484 * @param pvMem The memory address.
6485 * @param fAccess The access to.
6486 */
6487DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6488{
6489 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
6490 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6491 if ( pIemCpu->aMemMappings[0].pv == pvMem
6492 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6493 return 0;
6494 if ( pIemCpu->aMemMappings[1].pv == pvMem
6495 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6496 return 1;
6497 if ( pIemCpu->aMemMappings[2].pv == pvMem
6498 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6499 return 2;
6500 return VERR_NOT_FOUND;
6501}
6502
6503
6504/**
6505 * Finds a free memmap entry when using iNextMapping doesn't work.
6506 *
6507 * @returns Memory mapping index, 1024 on failure.
6508 * @param pIemCpu The IEM per CPU data.
6509 */
6510IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6511{
6512 /*
6513 * The easy case.
6514 */
6515 if (pIemCpu->cActiveMappings == 0)
6516 {
6517 pIemCpu->iNextMapping = 1;
6518 return 0;
6519 }
6520
6521 /* There should be enough mappings for all instructions. */
6522 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6523
6524 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6525 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6526 return i;
6527
6528 AssertFailedReturn(1024);
6529}
6530
6531
6532/**
6533 * Commits a bounce buffer that needs writing back and unmaps it.
6534 *
6535 * @returns Strict VBox status code.
6536 * @param pIemCpu The IEM per CPU data.
6537 * @param iMemMap The index of the buffer to commit.
6538 */
6539IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6540{
6541 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6542 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6543
6544 /*
6545 * Do the writing.
6546 */
6547#ifndef IEM_VERIFICATION_MODE_MINIMAL
6548 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6549 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6550 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6551 {
6552 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6553 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6554 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6555 if (!pIemCpu->fBypassHandlers)
6556 {
6557 /*
6558 * Carefully and efficiently dealing with access handler return
6559 * codes make this a little bloated.
6560 */
6561 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6562 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6563 pbBuf,
6564 cbFirst,
6565 PGMACCESSORIGIN_IEM);
6566 if (rcStrict == VINF_SUCCESS)
6567 {
6568 if (cbSecond)
6569 {
6570 rcStrict = PGMPhysWrite(pVM,
6571 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6572 pbBuf + cbFirst,
6573 cbSecond,
6574 PGMACCESSORIGIN_IEM);
6575 if (rcStrict == VINF_SUCCESS)
6576 { /* nothing */ }
6577 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6578 {
6579 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6580 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6581 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6582 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6583 }
6584 else
6585 {
6586 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6587 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6588 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6589 return rcStrict;
6590 }
6591 }
6592 }
6593 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6594 {
6595 if (!cbSecond)
6596 {
6597 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6598 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6599 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6600 }
6601 else
6602 {
6603 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6604 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6605 pbBuf + cbFirst,
6606 cbSecond,
6607 PGMACCESSORIGIN_IEM);
6608 if (rcStrict2 == VINF_SUCCESS)
6609 {
6610 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6611 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6612 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6613 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6614 }
6615 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6616 {
6617 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6618 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6619 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6620 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6621 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6622 }
6623 else
6624 {
6625 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6626 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6627 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6628 return rcStrict2;
6629 }
6630 }
6631 }
6632 else
6633 {
6634 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6635 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6636 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6637 return rcStrict;
6638 }
6639 }
6640 else
6641 {
6642 /*
6643 * No access handlers, much simpler.
6644 */
6645 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6646 if (RT_SUCCESS(rc))
6647 {
6648 if (cbSecond)
6649 {
6650 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6651 if (RT_SUCCESS(rc))
6652 { /* likely */ }
6653 else
6654 {
6655 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6656 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6657 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6658 return rc;
6659 }
6660 }
6661 }
6662 else
6663 {
6664 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6665 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6666 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6667 return rc;
6668 }
6669 }
6670 }
6671#endif
6672
6673#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6674 /*
6675 * Record the write(s).
6676 */
6677 if (!pIemCpu->fNoRem)
6678 {
6679 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6680 if (pEvtRec)
6681 {
6682 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6683 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6684 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6685 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6686 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6687 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6688 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6689 }
6690 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6691 {
6692 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6693 if (pEvtRec)
6694 {
6695 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6696 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6697 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6698 memcpy(pEvtRec->u.RamWrite.ab,
6699 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6700 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6701 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6702 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6703 }
6704 }
6705 }
6706#endif
6707#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6708 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6709 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6710 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6711 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6712 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6713 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6714
6715 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6716 g_cbIemWrote = cbWrote;
6717 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6718#endif
6719
6720 /*
6721 * Free the mapping entry.
6722 */
6723 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6724 Assert(pIemCpu->cActiveMappings != 0);
6725 pIemCpu->cActiveMappings--;
6726 return VINF_SUCCESS;
6727}
6728
6729
6730/**
6731 * iemMemMap worker that deals with a request crossing pages.
6732 */
6733IEM_STATIC VBOXSTRICTRC
6734iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6735{
6736 /*
6737 * Do the address translations.
6738 */
6739 RTGCPHYS GCPhysFirst;
6740 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6741 if (rcStrict != VINF_SUCCESS)
6742 return rcStrict;
6743
6744 RTGCPHYS GCPhysSecond;
6745 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
6746 fAccess, &GCPhysSecond);
6747 if (rcStrict != VINF_SUCCESS)
6748 return rcStrict;
6749 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6750
6751 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6752#ifdef IEM_VERIFICATION_MODE_FULL
6753 /*
6754 * Detect problematic memory when verifying so we can select
6755 * the right execution engine. (TLB: Redo this.)
6756 */
6757 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6758 {
6759 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6760 if (RT_SUCCESS(rc2))
6761 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6762 if (RT_FAILURE(rc2))
6763 pIemCpu->fProblematicMemory = true;
6764 }
6765#endif
6766
6767
6768 /*
6769 * Read in the current memory content if it's a read, execute or partial
6770 * write access.
6771 */
6772 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6773 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6774 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6775
6776 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6777 {
6778 if (!pIemCpu->fBypassHandlers)
6779 {
6780 /*
6781 * Must carefully deal with access handler status codes here,
6782 * makes the code a bit bloated.
6783 */
6784 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6785 if (rcStrict == VINF_SUCCESS)
6786 {
6787 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6788 if (rcStrict == VINF_SUCCESS)
6789 { /*likely */ }
6790 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6791 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6792 else
6793 {
6794 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6795 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6796 return rcStrict;
6797 }
6798 }
6799 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6800 {
6801 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6802 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6803 {
6804 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6805 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6806 }
6807 else
6808 {
6809 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6810 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6811 return rcStrict2;
6812 }
6813 }
6814 else
6815 {
6816 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6817 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6818 return rcStrict;
6819 }
6820 }
6821 else
6822 {
6823 /*
6824 * No informational status codes here, much more straight forward.
6825 */
6826 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6827 if (RT_SUCCESS(rc))
6828 {
6829 Assert(rc == VINF_SUCCESS);
6830 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6831 if (RT_SUCCESS(rc))
6832 Assert(rc == VINF_SUCCESS);
6833 else
6834 {
6835 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6836 return rc;
6837 }
6838 }
6839 else
6840 {
6841 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6842 return rc;
6843 }
6844 }
6845
6846#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6847 if ( !pIemCpu->fNoRem
6848 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6849 {
6850 /*
6851 * Record the reads.
6852 */
6853 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6854 if (pEvtRec)
6855 {
6856 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6857 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6858 pEvtRec->u.RamRead.cb = cbFirstPage;
6859 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6860 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6861 }
6862 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6863 if (pEvtRec)
6864 {
6865 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6866 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6867 pEvtRec->u.RamRead.cb = cbSecondPage;
6868 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6869 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6870 }
6871 }
6872#endif
6873 }
6874#ifdef VBOX_STRICT
6875 else
6876 memset(pbBuf, 0xcc, cbMem);
6877 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6878 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6879#endif
6880
6881 /*
6882 * Commit the bounce buffer entry.
6883 */
6884 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6885 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6886 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6887 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6888 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6889 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6890 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6891 pIemCpu->iNextMapping = iMemMap + 1;
6892 pIemCpu->cActiveMappings++;
6893
6894 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6895 *ppvMem = pbBuf;
6896 return VINF_SUCCESS;
6897}
6898
6899
6900/**
6901 * iemMemMap woker that deals with iemMemPageMap failures.
6902 */
6903IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6904 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6905{
6906 /*
6907 * Filter out conditions we can handle and the ones which shouldn't happen.
6908 */
6909 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6910 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6911 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6912 {
6913 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6914 return rcMap;
6915 }
6916 pIemCpu->cPotentialExits++;
6917
6918 /*
6919 * Read in the current memory content if it's a read, execute or partial
6920 * write access.
6921 */
6922 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6923 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6924 {
6925 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6926 memset(pbBuf, 0xff, cbMem);
6927 else
6928 {
6929 int rc;
6930 if (!pIemCpu->fBypassHandlers)
6931 {
6932 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6933 if (rcStrict == VINF_SUCCESS)
6934 { /* nothing */ }
6935 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6936 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6937 else
6938 {
6939 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6940 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6941 return rcStrict;
6942 }
6943 }
6944 else
6945 {
6946 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6947 if (RT_SUCCESS(rc))
6948 { /* likely */ }
6949 else
6950 {
6951 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6952 GCPhysFirst, rc));
6953 return rc;
6954 }
6955 }
6956 }
6957
6958#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6959 if ( !pIemCpu->fNoRem
6960 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6961 {
6962 /*
6963 * Record the read.
6964 */
6965 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6966 if (pEvtRec)
6967 {
6968 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6969 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6970 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6971 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6972 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6973 }
6974 }
6975#endif
6976 }
6977#ifdef VBOX_STRICT
6978 else
6979 memset(pbBuf, 0xcc, cbMem);
6980#endif
6981#ifdef VBOX_STRICT
6982 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6983 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6984#endif
6985
6986 /*
6987 * Commit the bounce buffer entry.
6988 */
6989 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6990 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6991 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6992 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6993 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6994 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6995 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6996 pIemCpu->iNextMapping = iMemMap + 1;
6997 pIemCpu->cActiveMappings++;
6998
6999 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7000 *ppvMem = pbBuf;
7001 return VINF_SUCCESS;
7002}
7003
7004
7005
7006/**
7007 * Maps the specified guest memory for the given kind of access.
7008 *
7009 * This may be using bounce buffering of the memory if it's crossing a page
7010 * boundary or if there is an access handler installed for any of it. Because
7011 * of lock prefix guarantees, we're in for some extra clutter when this
7012 * happens.
7013 *
7014 * This may raise a \#GP, \#SS, \#PF or \#AC.
7015 *
7016 * @returns VBox strict status code.
7017 *
7018 * @param pIemCpu The IEM per CPU data.
7019 * @param ppvMem Where to return the pointer to the mapped
7020 * memory.
7021 * @param cbMem The number of bytes to map. This is usually 1,
7022 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7023 * string operations it can be up to a page.
7024 * @param iSegReg The index of the segment register to use for
7025 * this access. The base and limits are checked.
7026 * Use UINT8_MAX to indicate that no segmentation
7027 * is required (for IDT, GDT and LDT accesses).
7028 * @param GCPtrMem The address of the guest memory.
7029 * @param fAccess How the memory is being accessed. The
7030 * IEM_ACCESS_TYPE_XXX bit is used to figure out
7031 * how to map the memory, while the
7032 * IEM_ACCESS_WHAT_XXX bit is used when raising
7033 * exceptions.
7034 */
7035IEM_STATIC VBOXSTRICTRC
7036iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
7037{
7038 /*
7039 * Check the input and figure out which mapping entry to use.
7040 */
7041 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7042 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
7043 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
7044
7045 unsigned iMemMap = pIemCpu->iNextMapping;
7046 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
7047 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7048 {
7049 iMemMap = iemMemMapFindFree(pIemCpu);
7050 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
7051 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
7052 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
7053 pIemCpu->aMemMappings[2].fAccess),
7054 VERR_IEM_IPE_9);
7055 }
7056
7057 /*
7058 * Map the memory, checking that we can actually access it. If something
7059 * slightly complicated happens, fall back on bounce buffering.
7060 */
7061 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7062 if (rcStrict != VINF_SUCCESS)
7063 return rcStrict;
7064
7065 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
7066 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
7067
7068 RTGCPHYS GCPhysFirst;
7069 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
7070 if (rcStrict != VINF_SUCCESS)
7071 return rcStrict;
7072
7073 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7074 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7075 if (fAccess & IEM_ACCESS_TYPE_READ)
7076 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7077
7078 void *pvMem;
7079 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7080 if (rcStrict != VINF_SUCCESS)
7081 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
7082
7083 /*
7084 * Fill in the mapping table entry.
7085 */
7086 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7087 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7088 pIemCpu->iNextMapping = iMemMap + 1;
7089 pIemCpu->cActiveMappings++;
7090
7091 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7092 *ppvMem = pvMem;
7093 return VINF_SUCCESS;
7094}
7095
7096
7097/**
7098 * Commits the guest memory if bounce buffered and unmaps it.
7099 *
7100 * @returns Strict VBox status code.
7101 * @param pIemCpu The IEM per CPU data.
7102 * @param pvMem The mapping.
7103 * @param fAccess The kind of access.
7104 */
7105IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7106{
7107 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7108 AssertReturn(iMemMap >= 0, iMemMap);
7109
7110 /* If it's bounce buffered, we may need to write back the buffer. */
7111 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7112 {
7113 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7114 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7115 }
7116 /* Otherwise unlock it. */
7117 else
7118 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7119
7120 /* Free the entry. */
7121 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7122 Assert(pIemCpu->cActiveMappings != 0);
7123 pIemCpu->cActiveMappings--;
7124 return VINF_SUCCESS;
7125}
7126
7127
7128/**
7129 * Rollbacks mappings, releasing page locks and such.
7130 *
7131 * The caller shall only call this after checking cActiveMappings.
7132 *
7133 * @returns Strict VBox status code to pass up.
7134 * @param pIemCpu The IEM per CPU data.
7135 */
7136IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7137{
7138 Assert(pIemCpu->cActiveMappings > 0);
7139
7140 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7141 while (iMemMap-- > 0)
7142 {
7143 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7144 if (fAccess != IEM_ACCESS_INVALID)
7145 {
7146 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7147 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7148 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7149 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7150 Assert(pIemCpu->cActiveMappings > 0);
7151 pIemCpu->cActiveMappings--;
7152 }
7153 }
7154}
7155
7156
7157/**
7158 * Fetches a data byte.
7159 *
7160 * @returns Strict VBox status code.
7161 * @param pIemCpu The IEM per CPU data.
7162 * @param pu8Dst Where to return the byte.
7163 * @param iSegReg The index of the segment register to use for
7164 * this access. The base and limits are checked.
7165 * @param GCPtrMem The address of the guest memory.
7166 */
7167IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7168{
7169 /* The lazy approach for now... */
7170 uint8_t const *pu8Src;
7171 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7172 if (rc == VINF_SUCCESS)
7173 {
7174 *pu8Dst = *pu8Src;
7175 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7176 }
7177 return rc;
7178}
7179
7180
7181/**
7182 * Fetches a data word.
7183 *
7184 * @returns Strict VBox status code.
7185 * @param pIemCpu The IEM per CPU data.
7186 * @param pu16Dst Where to return the word.
7187 * @param iSegReg The index of the segment register to use for
7188 * this access. The base and limits are checked.
7189 * @param GCPtrMem The address of the guest memory.
7190 */
7191IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7192{
7193 /* The lazy approach for now... */
7194 uint16_t const *pu16Src;
7195 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7196 if (rc == VINF_SUCCESS)
7197 {
7198 *pu16Dst = *pu16Src;
7199 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7200 }
7201 return rc;
7202}
7203
7204
7205/**
7206 * Fetches a data dword.
7207 *
7208 * @returns Strict VBox status code.
7209 * @param pIemCpu The IEM per CPU data.
7210 * @param pu32Dst Where to return the dword.
7211 * @param iSegReg The index of the segment register to use for
7212 * this access. The base and limits are checked.
7213 * @param GCPtrMem The address of the guest memory.
7214 */
7215IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7216{
7217 /* The lazy approach for now... */
7218 uint32_t const *pu32Src;
7219 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7220 if (rc == VINF_SUCCESS)
7221 {
7222 *pu32Dst = *pu32Src;
7223 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7224 }
7225 return rc;
7226}
7227
7228
7229#ifdef SOME_UNUSED_FUNCTION
7230/**
7231 * Fetches a data dword and sign extends it to a qword.
7232 *
7233 * @returns Strict VBox status code.
7234 * @param pIemCpu The IEM per CPU data.
7235 * @param pu64Dst Where to return the sign extended value.
7236 * @param iSegReg The index of the segment register to use for
7237 * this access. The base and limits are checked.
7238 * @param GCPtrMem The address of the guest memory.
7239 */
7240IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7241{
7242 /* The lazy approach for now... */
7243 int32_t const *pi32Src;
7244 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7245 if (rc == VINF_SUCCESS)
7246 {
7247 *pu64Dst = *pi32Src;
7248 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7249 }
7250#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7251 else
7252 *pu64Dst = 0;
7253#endif
7254 return rc;
7255}
7256#endif
7257
7258
7259/**
7260 * Fetches a data qword.
7261 *
7262 * @returns Strict VBox status code.
7263 * @param pIemCpu The IEM per CPU data.
7264 * @param pu64Dst Where to return the qword.
7265 * @param iSegReg The index of the segment register to use for
7266 * this access. The base and limits are checked.
7267 * @param GCPtrMem The address of the guest memory.
7268 */
7269IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7270{
7271 /* The lazy approach for now... */
7272 uint64_t const *pu64Src;
7273 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7274 if (rc == VINF_SUCCESS)
7275 {
7276 *pu64Dst = *pu64Src;
7277 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7278 }
7279 return rc;
7280}
7281
7282
7283/**
7284 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7285 *
7286 * @returns Strict VBox status code.
7287 * @param pIemCpu The IEM per CPU data.
7288 * @param pu64Dst Where to return the qword.
7289 * @param iSegReg The index of the segment register to use for
7290 * this access. The base and limits are checked.
7291 * @param GCPtrMem The address of the guest memory.
7292 */
7293IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7294{
7295 /* The lazy approach for now... */
7296 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7297 if (RT_UNLIKELY(GCPtrMem & 15))
7298 return iemRaiseGeneralProtectionFault0(pIemCpu);
7299
7300 uint64_t const *pu64Src;
7301 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7302 if (rc == VINF_SUCCESS)
7303 {
7304 *pu64Dst = *pu64Src;
7305 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7306 }
7307 return rc;
7308}
7309
7310
7311/**
7312 * Fetches a data tword.
7313 *
7314 * @returns Strict VBox status code.
7315 * @param pIemCpu The IEM per CPU data.
7316 * @param pr80Dst Where to return the tword.
7317 * @param iSegReg The index of the segment register to use for
7318 * this access. The base and limits are checked.
7319 * @param GCPtrMem The address of the guest memory.
7320 */
7321IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7322{
7323 /* The lazy approach for now... */
7324 PCRTFLOAT80U pr80Src;
7325 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7326 if (rc == VINF_SUCCESS)
7327 {
7328 *pr80Dst = *pr80Src;
7329 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7330 }
7331 return rc;
7332}
7333
7334
7335/**
7336 * Fetches a data dqword (double qword), generally SSE related.
7337 *
7338 * @returns Strict VBox status code.
7339 * @param pIemCpu The IEM per CPU data.
7340 * @param pu128Dst Where to return the qword.
7341 * @param iSegReg The index of the segment register to use for
7342 * this access. The base and limits are checked.
7343 * @param GCPtrMem The address of the guest memory.
7344 */
7345IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7346{
7347 /* The lazy approach for now... */
7348 uint128_t const *pu128Src;
7349 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7350 if (rc == VINF_SUCCESS)
7351 {
7352 *pu128Dst = *pu128Src;
7353 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7354 }
7355 return rc;
7356}
7357
7358
7359/**
7360 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7361 * related.
7362 *
7363 * Raises \#GP(0) if not aligned.
7364 *
7365 * @returns Strict VBox status code.
7366 * @param pIemCpu The IEM per CPU data.
7367 * @param pu128Dst Where to return the qword.
7368 * @param iSegReg The index of the segment register to use for
7369 * this access. The base and limits are checked.
7370 * @param GCPtrMem The address of the guest memory.
7371 */
7372IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7373{
7374 /* The lazy approach for now... */
7375 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7376 if ( (GCPtrMem & 15)
7377 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7378 return iemRaiseGeneralProtectionFault0(pIemCpu);
7379
7380 uint128_t const *pu128Src;
7381 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7382 if (rc == VINF_SUCCESS)
7383 {
7384 *pu128Dst = *pu128Src;
7385 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7386 }
7387 return rc;
7388}
7389
7390
7391
7392
7393/**
7394 * Fetches a descriptor register (lgdt, lidt).
7395 *
7396 * @returns Strict VBox status code.
7397 * @param pIemCpu The IEM per CPU data.
7398 * @param pcbLimit Where to return the limit.
7399 * @param pGCPtrBase Where to return the base.
7400 * @param iSegReg The index of the segment register to use for
7401 * this access. The base and limits are checked.
7402 * @param GCPtrMem The address of the guest memory.
7403 * @param enmOpSize The effective operand size.
7404 */
7405IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7406 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7407{
7408 /*
7409 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7410 * little special:
7411 * - The two reads are done separately.
7412 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7413 * - We suspect the 386 to actually commit the limit before the base in
7414 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7415 * don't try emulate this eccentric behavior, because it's not well
7416 * enough understood and rather hard to trigger.
7417 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7418 */
7419 VBOXSTRICTRC rcStrict;
7420 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7421 {
7422 rcStrict = iemMemFetchDataU16(pIemCpu, pcbLimit, iSegReg, GCPtrMem);
7423 if (rcStrict == VINF_SUCCESS)
7424 rcStrict = iemMemFetchDataU64(pIemCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7425 }
7426 else
7427 {
7428 uint32_t uTmp;
7429 if (enmOpSize == IEMMODE_32BIT)
7430 {
7431 if (IEM_GET_TARGET_CPU(pIemCpu) != IEMTARGETCPU_486)
7432 {
7433 rcStrict = iemMemFetchDataU16(pIemCpu, pcbLimit, iSegReg, GCPtrMem);
7434 if (rcStrict == VINF_SUCCESS)
7435 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem + 2);
7436 }
7437 else
7438 {
7439 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem);
7440 if (rcStrict == VINF_SUCCESS)
7441 {
7442 *pcbLimit = (uint16_t)uTmp;
7443 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem + 2);
7444 }
7445 }
7446 if (rcStrict == VINF_SUCCESS)
7447 *pGCPtrBase = uTmp;
7448 }
7449 else
7450 {
7451 rcStrict = iemMemFetchDataU16(pIemCpu, pcbLimit, iSegReg, GCPtrMem);
7452 if (rcStrict == VINF_SUCCESS)
7453 {
7454 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem + 2);
7455 if (rcStrict == VINF_SUCCESS)
7456 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7457 }
7458 }
7459 }
7460 return rcStrict;
7461}
7462
7463
7464
7465/**
7466 * Stores a data byte.
7467 *
7468 * @returns Strict VBox status code.
7469 * @param pIemCpu The IEM per CPU data.
7470 * @param iSegReg The index of the segment register to use for
7471 * this access. The base and limits are checked.
7472 * @param GCPtrMem The address of the guest memory.
7473 * @param u8Value The value to store.
7474 */
7475IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7476{
7477 /* The lazy approach for now... */
7478 uint8_t *pu8Dst;
7479 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7480 if (rc == VINF_SUCCESS)
7481 {
7482 *pu8Dst = u8Value;
7483 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7484 }
7485 return rc;
7486}
7487
7488
7489/**
7490 * Stores a data word.
7491 *
7492 * @returns Strict VBox status code.
7493 * @param pIemCpu The IEM per CPU data.
7494 * @param iSegReg The index of the segment register to use for
7495 * this access. The base and limits are checked.
7496 * @param GCPtrMem The address of the guest memory.
7497 * @param u16Value The value to store.
7498 */
7499IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7500{
7501 /* The lazy approach for now... */
7502 uint16_t *pu16Dst;
7503 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7504 if (rc == VINF_SUCCESS)
7505 {
7506 *pu16Dst = u16Value;
7507 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7508 }
7509 return rc;
7510}
7511
7512
7513/**
7514 * Stores a data dword.
7515 *
7516 * @returns Strict VBox status code.
7517 * @param pIemCpu The IEM per CPU data.
7518 * @param iSegReg The index of the segment register to use for
7519 * this access. The base and limits are checked.
7520 * @param GCPtrMem The address of the guest memory.
7521 * @param u32Value The value to store.
7522 */
7523IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7524{
7525 /* The lazy approach for now... */
7526 uint32_t *pu32Dst;
7527 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7528 if (rc == VINF_SUCCESS)
7529 {
7530 *pu32Dst = u32Value;
7531 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7532 }
7533 return rc;
7534}
7535
7536
7537/**
7538 * Stores a data qword.
7539 *
7540 * @returns Strict VBox status code.
7541 * @param pIemCpu The IEM per CPU data.
7542 * @param iSegReg The index of the segment register to use for
7543 * this access. The base and limits are checked.
7544 * @param GCPtrMem The address of the guest memory.
7545 * @param u64Value The value to store.
7546 */
7547IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7548{
7549 /* The lazy approach for now... */
7550 uint64_t *pu64Dst;
7551 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7552 if (rc == VINF_SUCCESS)
7553 {
7554 *pu64Dst = u64Value;
7555 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7556 }
7557 return rc;
7558}
7559
7560
7561/**
7562 * Stores a data dqword.
7563 *
7564 * @returns Strict VBox status code.
7565 * @param pIemCpu The IEM per CPU data.
7566 * @param iSegReg The index of the segment register to use for
7567 * this access. The base and limits are checked.
7568 * @param GCPtrMem The address of the guest memory.
7569 * @param u128Value The value to store.
7570 */
7571IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7572{
7573 /* The lazy approach for now... */
7574 uint128_t *pu128Dst;
7575 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7576 if (rc == VINF_SUCCESS)
7577 {
7578 *pu128Dst = u128Value;
7579 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7580 }
7581 return rc;
7582}
7583
7584
7585/**
7586 * Stores a data dqword, SSE aligned.
7587 *
7588 * @returns Strict VBox status code.
7589 * @param pIemCpu The IEM per CPU data.
7590 * @param iSegReg The index of the segment register to use for
7591 * this access. The base and limits are checked.
7592 * @param GCPtrMem The address of the guest memory.
7593 * @param u128Value The value to store.
7594 */
7595IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7596{
7597 /* The lazy approach for now... */
7598 if ( (GCPtrMem & 15)
7599 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7600 return iemRaiseGeneralProtectionFault0(pIemCpu);
7601
7602 uint128_t *pu128Dst;
7603 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7604 if (rc == VINF_SUCCESS)
7605 {
7606 *pu128Dst = u128Value;
7607 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7608 }
7609 return rc;
7610}
7611
7612
7613/**
7614 * Stores a descriptor register (sgdt, sidt).
7615 *
7616 * @returns Strict VBox status code.
7617 * @param pIemCpu The IEM per CPU data.
7618 * @param cbLimit The limit.
7619 * @param GCPtrBase The base address.
7620 * @param iSegReg The index of the segment register to use for
7621 * this access. The base and limits are checked.
7622 * @param GCPtrMem The address of the guest memory.
7623 */
7624IEM_STATIC VBOXSTRICTRC
7625iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
7626{
7627 /*
7628 * The SIDT and SGDT instructions actually stores the data using two
7629 * independent writes. The instructions does not respond to opsize prefixes.
7630 */
7631 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pIemCpu, iSegReg, GCPtrMem, cbLimit);
7632 if (rcStrict == VINF_SUCCESS)
7633 {
7634 if (pIemCpu->enmCpuMode == IEMMODE_16BIT)
7635 rcStrict = iemMemStoreDataU32(pIemCpu, iSegReg, GCPtrMem + 2,
7636 IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_286
7637 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7638 else if (pIemCpu->enmCpuMode == IEMMODE_32BIT)
7639 rcStrict = iemMemStoreDataU32(pIemCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7640 else
7641 rcStrict = iemMemStoreDataU64(pIemCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7642 }
7643 return rcStrict;
7644}
7645
7646
7647/**
7648 * Pushes a word onto the stack.
7649 *
7650 * @returns Strict VBox status code.
7651 * @param pIemCpu The IEM per CPU data.
7652 * @param u16Value The value to push.
7653 */
7654IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7655{
7656 /* Increment the stack pointer. */
7657 uint64_t uNewRsp;
7658 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7659 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7660
7661 /* Write the word the lazy way. */
7662 uint16_t *pu16Dst;
7663 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7664 if (rc == VINF_SUCCESS)
7665 {
7666 *pu16Dst = u16Value;
7667 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7668 }
7669
7670 /* Commit the new RSP value unless we an access handler made trouble. */
7671 if (rc == VINF_SUCCESS)
7672 pCtx->rsp = uNewRsp;
7673
7674 return rc;
7675}
7676
7677
7678/**
7679 * Pushes a dword onto the stack.
7680 *
7681 * @returns Strict VBox status code.
7682 * @param pIemCpu The IEM per CPU data.
7683 * @param u32Value The value to push.
7684 */
7685IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7686{
7687 /* Increment the stack pointer. */
7688 uint64_t uNewRsp;
7689 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7690 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7691
7692 /* Write the dword the lazy way. */
7693 uint32_t *pu32Dst;
7694 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7695 if (rc == VINF_SUCCESS)
7696 {
7697 *pu32Dst = u32Value;
7698 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7699 }
7700
7701 /* Commit the new RSP value unless we an access handler made trouble. */
7702 if (rc == VINF_SUCCESS)
7703 pCtx->rsp = uNewRsp;
7704
7705 return rc;
7706}
7707
7708
7709/**
7710 * Pushes a dword segment register value onto the stack.
7711 *
7712 * @returns Strict VBox status code.
7713 * @param pIemCpu The IEM per CPU data.
7714 * @param u32Value The value to push.
7715 */
7716IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7717{
7718 /* Increment the stack pointer. */
7719 uint64_t uNewRsp;
7720 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7721 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7722
7723 VBOXSTRICTRC rc;
7724 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7725 {
7726 /* The recompiler writes a full dword. */
7727 uint32_t *pu32Dst;
7728 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7729 if (rc == VINF_SUCCESS)
7730 {
7731 *pu32Dst = u32Value;
7732 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7733 }
7734 }
7735 else
7736 {
7737 /* The intel docs talks about zero extending the selector register
7738 value. My actual intel CPU here might be zero extending the value
7739 but it still only writes the lower word... */
7740 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7741 * happens when crossing an electric page boundrary, is the high word checked
7742 * for write accessibility or not? Probably it is. What about segment limits?
7743 * It appears this behavior is also shared with trap error codes.
7744 *
7745 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7746 * ancient hardware when it actually did change. */
7747 uint16_t *pu16Dst;
7748 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7749 if (rc == VINF_SUCCESS)
7750 {
7751 *pu16Dst = (uint16_t)u32Value;
7752 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7753 }
7754 }
7755
7756 /* Commit the new RSP value unless we an access handler made trouble. */
7757 if (rc == VINF_SUCCESS)
7758 pCtx->rsp = uNewRsp;
7759
7760 return rc;
7761}
7762
7763
7764/**
7765 * Pushes a qword onto the stack.
7766 *
7767 * @returns Strict VBox status code.
7768 * @param pIemCpu The IEM per CPU data.
7769 * @param u64Value The value to push.
7770 */
7771IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7772{
7773 /* Increment the stack pointer. */
7774 uint64_t uNewRsp;
7775 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7776 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7777
7778 /* Write the word the lazy way. */
7779 uint64_t *pu64Dst;
7780 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7781 if (rc == VINF_SUCCESS)
7782 {
7783 *pu64Dst = u64Value;
7784 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7785 }
7786
7787 /* Commit the new RSP value unless we an access handler made trouble. */
7788 if (rc == VINF_SUCCESS)
7789 pCtx->rsp = uNewRsp;
7790
7791 return rc;
7792}
7793
7794
7795/**
7796 * Pops a word from the stack.
7797 *
7798 * @returns Strict VBox status code.
7799 * @param pIemCpu The IEM per CPU data.
7800 * @param pu16Value Where to store the popped value.
7801 */
7802IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7803{
7804 /* Increment the stack pointer. */
7805 uint64_t uNewRsp;
7806 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7807 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7808
7809 /* Write the word the lazy way. */
7810 uint16_t const *pu16Src;
7811 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7812 if (rc == VINF_SUCCESS)
7813 {
7814 *pu16Value = *pu16Src;
7815 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7816
7817 /* Commit the new RSP value. */
7818 if (rc == VINF_SUCCESS)
7819 pCtx->rsp = uNewRsp;
7820 }
7821
7822 return rc;
7823}
7824
7825
7826/**
7827 * Pops a dword from the stack.
7828 *
7829 * @returns Strict VBox status code.
7830 * @param pIemCpu The IEM per CPU data.
7831 * @param pu32Value Where to store the popped value.
7832 */
7833IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7834{
7835 /* Increment the stack pointer. */
7836 uint64_t uNewRsp;
7837 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7838 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7839
7840 /* Write the word the lazy way. */
7841 uint32_t const *pu32Src;
7842 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7843 if (rc == VINF_SUCCESS)
7844 {
7845 *pu32Value = *pu32Src;
7846 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7847
7848 /* Commit the new RSP value. */
7849 if (rc == VINF_SUCCESS)
7850 pCtx->rsp = uNewRsp;
7851 }
7852
7853 return rc;
7854}
7855
7856
7857/**
7858 * Pops a qword from the stack.
7859 *
7860 * @returns Strict VBox status code.
7861 * @param pIemCpu The IEM per CPU data.
7862 * @param pu64Value Where to store the popped value.
7863 */
7864IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7865{
7866 /* Increment the stack pointer. */
7867 uint64_t uNewRsp;
7868 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7869 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7870
7871 /* Write the word the lazy way. */
7872 uint64_t const *pu64Src;
7873 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7874 if (rc == VINF_SUCCESS)
7875 {
7876 *pu64Value = *pu64Src;
7877 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7878
7879 /* Commit the new RSP value. */
7880 if (rc == VINF_SUCCESS)
7881 pCtx->rsp = uNewRsp;
7882 }
7883
7884 return rc;
7885}
7886
7887
7888/**
7889 * Pushes a word onto the stack, using a temporary stack pointer.
7890 *
7891 * @returns Strict VBox status code.
7892 * @param pIemCpu The IEM per CPU data.
7893 * @param u16Value The value to push.
7894 * @param pTmpRsp Pointer to the temporary stack pointer.
7895 */
7896IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7897{
7898 /* Increment the stack pointer. */
7899 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7900 RTUINT64U NewRsp = *pTmpRsp;
7901 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7902
7903 /* Write the word the lazy way. */
7904 uint16_t *pu16Dst;
7905 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7906 if (rc == VINF_SUCCESS)
7907 {
7908 *pu16Dst = u16Value;
7909 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7910 }
7911
7912 /* Commit the new RSP value unless we an access handler made trouble. */
7913 if (rc == VINF_SUCCESS)
7914 *pTmpRsp = NewRsp;
7915
7916 return rc;
7917}
7918
7919
7920/**
7921 * Pushes a dword onto the stack, using a temporary stack pointer.
7922 *
7923 * @returns Strict VBox status code.
7924 * @param pIemCpu The IEM per CPU data.
7925 * @param u32Value The value to push.
7926 * @param pTmpRsp Pointer to the temporary stack pointer.
7927 */
7928IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7929{
7930 /* Increment the stack pointer. */
7931 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7932 RTUINT64U NewRsp = *pTmpRsp;
7933 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7934
7935 /* Write the word the lazy way. */
7936 uint32_t *pu32Dst;
7937 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7938 if (rc == VINF_SUCCESS)
7939 {
7940 *pu32Dst = u32Value;
7941 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7942 }
7943
7944 /* Commit the new RSP value unless we an access handler made trouble. */
7945 if (rc == VINF_SUCCESS)
7946 *pTmpRsp = NewRsp;
7947
7948 return rc;
7949}
7950
7951
7952/**
7953 * Pushes a dword onto the stack, using a temporary stack pointer.
7954 *
7955 * @returns Strict VBox status code.
7956 * @param pIemCpu The IEM per CPU data.
7957 * @param u64Value The value to push.
7958 * @param pTmpRsp Pointer to the temporary stack pointer.
7959 */
7960IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7961{
7962 /* Increment the stack pointer. */
7963 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7964 RTUINT64U NewRsp = *pTmpRsp;
7965 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7966
7967 /* Write the word the lazy way. */
7968 uint64_t *pu64Dst;
7969 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7970 if (rc == VINF_SUCCESS)
7971 {
7972 *pu64Dst = u64Value;
7973 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7974 }
7975
7976 /* Commit the new RSP value unless we an access handler made trouble. */
7977 if (rc == VINF_SUCCESS)
7978 *pTmpRsp = NewRsp;
7979
7980 return rc;
7981}
7982
7983
7984/**
7985 * Pops a word from the stack, using a temporary stack pointer.
7986 *
7987 * @returns Strict VBox status code.
7988 * @param pIemCpu The IEM per CPU data.
7989 * @param pu16Value Where to store the popped value.
7990 * @param pTmpRsp Pointer to the temporary stack pointer.
7991 */
7992IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7993{
7994 /* Increment the stack pointer. */
7995 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7996 RTUINT64U NewRsp = *pTmpRsp;
7997 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7998
7999 /* Write the word the lazy way. */
8000 uint16_t const *pu16Src;
8001 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8002 if (rc == VINF_SUCCESS)
8003 {
8004 *pu16Value = *pu16Src;
8005 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8006
8007 /* Commit the new RSP value. */
8008 if (rc == VINF_SUCCESS)
8009 *pTmpRsp = NewRsp;
8010 }
8011
8012 return rc;
8013}
8014
8015
8016/**
8017 * Pops a dword from the stack, using a temporary stack pointer.
8018 *
8019 * @returns Strict VBox status code.
8020 * @param pIemCpu The IEM per CPU data.
8021 * @param pu32Value Where to store the popped value.
8022 * @param pTmpRsp Pointer to the temporary stack pointer.
8023 */
8024IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
8025{
8026 /* Increment the stack pointer. */
8027 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8028 RTUINT64U NewRsp = *pTmpRsp;
8029 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
8030
8031 /* Write the word the lazy way. */
8032 uint32_t const *pu32Src;
8033 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8034 if (rc == VINF_SUCCESS)
8035 {
8036 *pu32Value = *pu32Src;
8037 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8038
8039 /* Commit the new RSP value. */
8040 if (rc == VINF_SUCCESS)
8041 *pTmpRsp = NewRsp;
8042 }
8043
8044 return rc;
8045}
8046
8047
8048/**
8049 * Pops a qword from the stack, using a temporary stack pointer.
8050 *
8051 * @returns Strict VBox status code.
8052 * @param pIemCpu The IEM per CPU data.
8053 * @param pu64Value Where to store the popped value.
8054 * @param pTmpRsp Pointer to the temporary stack pointer.
8055 */
8056IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
8057{
8058 /* Increment the stack pointer. */
8059 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8060 RTUINT64U NewRsp = *pTmpRsp;
8061 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8062
8063 /* Write the word the lazy way. */
8064 uint64_t const *pu64Src;
8065 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8066 if (rcStrict == VINF_SUCCESS)
8067 {
8068 *pu64Value = *pu64Src;
8069 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8070
8071 /* Commit the new RSP value. */
8072 if (rcStrict == VINF_SUCCESS)
8073 *pTmpRsp = NewRsp;
8074 }
8075
8076 return rcStrict;
8077}
8078
8079
8080/**
8081 * Begin a special stack push (used by interrupt, exceptions and such).
8082 *
8083 * This will raise \#SS or \#PF if appropriate.
8084 *
8085 * @returns Strict VBox status code.
8086 * @param pIemCpu The IEM per CPU data.
8087 * @param cbMem The number of bytes to push onto the stack.
8088 * @param ppvMem Where to return the pointer to the stack memory.
8089 * As with the other memory functions this could be
8090 * direct access or bounce buffered access, so
8091 * don't commit register until the commit call
8092 * succeeds.
8093 * @param puNewRsp Where to return the new RSP value. This must be
8094 * passed unchanged to
8095 * iemMemStackPushCommitSpecial().
8096 */
8097IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8098{
8099 Assert(cbMem < UINT8_MAX);
8100 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8101 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8102 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8103}
8104
8105
8106/**
8107 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8108 *
8109 * This will update the rSP.
8110 *
8111 * @returns Strict VBox status code.
8112 * @param pIemCpu The IEM per CPU data.
8113 * @param pvMem The pointer returned by
8114 * iemMemStackPushBeginSpecial().
8115 * @param uNewRsp The new RSP value returned by
8116 * iemMemStackPushBeginSpecial().
8117 */
8118IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8119{
8120 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8121 if (rcStrict == VINF_SUCCESS)
8122 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8123 return rcStrict;
8124}
8125
8126
8127/**
8128 * Begin a special stack pop (used by iret, retf and such).
8129 *
8130 * This will raise \#SS or \#PF if appropriate.
8131 *
8132 * @returns Strict VBox status code.
8133 * @param pIemCpu The IEM per CPU data.
8134 * @param cbMem The number of bytes to push onto the stack.
8135 * @param ppvMem Where to return the pointer to the stack memory.
8136 * @param puNewRsp Where to return the new RSP value. This must be
8137 * passed unchanged to
8138 * iemMemStackPopCommitSpecial() or applied
8139 * manually if iemMemStackPopDoneSpecial() is used.
8140 */
8141IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8142{
8143 Assert(cbMem < UINT8_MAX);
8144 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8145 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8146 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8147}
8148
8149
8150/**
8151 * Continue a special stack pop (used by iret and retf).
8152 *
8153 * This will raise \#SS or \#PF if appropriate.
8154 *
8155 * @returns Strict VBox status code.
8156 * @param pIemCpu The IEM per CPU data.
8157 * @param cbMem The number of bytes to push onto the stack.
8158 * @param ppvMem Where to return the pointer to the stack memory.
8159 * @param puNewRsp Where to return the new RSP value. This must be
8160 * passed unchanged to
8161 * iemMemStackPopCommitSpecial() or applied
8162 * manually if iemMemStackPopDoneSpecial() is used.
8163 */
8164IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8165{
8166 Assert(cbMem < UINT8_MAX);
8167 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8168 RTUINT64U NewRsp;
8169 NewRsp.u = *puNewRsp;
8170 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8171 *puNewRsp = NewRsp.u;
8172 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8173}
8174
8175
8176/**
8177 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8178 *
8179 * This will update the rSP.
8180 *
8181 * @returns Strict VBox status code.
8182 * @param pIemCpu The IEM per CPU data.
8183 * @param pvMem The pointer returned by
8184 * iemMemStackPopBeginSpecial().
8185 * @param uNewRsp The new RSP value returned by
8186 * iemMemStackPopBeginSpecial().
8187 */
8188IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8189{
8190 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8191 if (rcStrict == VINF_SUCCESS)
8192 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8193 return rcStrict;
8194}
8195
8196
8197/**
8198 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8199 * iemMemStackPopContinueSpecial).
8200 *
8201 * The caller will manually commit the rSP.
8202 *
8203 * @returns Strict VBox status code.
8204 * @param pIemCpu The IEM per CPU data.
8205 * @param pvMem The pointer returned by
8206 * iemMemStackPopBeginSpecial() or
8207 * iemMemStackPopContinueSpecial().
8208 */
8209IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8210{
8211 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8212}
8213
8214
8215/**
8216 * Fetches a system table byte.
8217 *
8218 * @returns Strict VBox status code.
8219 * @param pIemCpu The IEM per CPU data.
8220 * @param pbDst Where to return the byte.
8221 * @param iSegReg The index of the segment register to use for
8222 * this access. The base and limits are checked.
8223 * @param GCPtrMem The address of the guest memory.
8224 */
8225IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8226{
8227 /* The lazy approach for now... */
8228 uint8_t const *pbSrc;
8229 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8230 if (rc == VINF_SUCCESS)
8231 {
8232 *pbDst = *pbSrc;
8233 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8234 }
8235 return rc;
8236}
8237
8238
8239/**
8240 * Fetches a system table word.
8241 *
8242 * @returns Strict VBox status code.
8243 * @param pIemCpu The IEM per CPU data.
8244 * @param pu16Dst Where to return the word.
8245 * @param iSegReg The index of the segment register to use for
8246 * this access. The base and limits are checked.
8247 * @param GCPtrMem The address of the guest memory.
8248 */
8249IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8250{
8251 /* The lazy approach for now... */
8252 uint16_t const *pu16Src;
8253 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8254 if (rc == VINF_SUCCESS)
8255 {
8256 *pu16Dst = *pu16Src;
8257 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8258 }
8259 return rc;
8260}
8261
8262
8263/**
8264 * Fetches a system table dword.
8265 *
8266 * @returns Strict VBox status code.
8267 * @param pIemCpu The IEM per CPU data.
8268 * @param pu32Dst Where to return the dword.
8269 * @param iSegReg The index of the segment register to use for
8270 * this access. The base and limits are checked.
8271 * @param GCPtrMem The address of the guest memory.
8272 */
8273IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8274{
8275 /* The lazy approach for now... */
8276 uint32_t const *pu32Src;
8277 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8278 if (rc == VINF_SUCCESS)
8279 {
8280 *pu32Dst = *pu32Src;
8281 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8282 }
8283 return rc;
8284}
8285
8286
8287/**
8288 * Fetches a system table qword.
8289 *
8290 * @returns Strict VBox status code.
8291 * @param pIemCpu The IEM per CPU data.
8292 * @param pu64Dst Where to return the qword.
8293 * @param iSegReg The index of the segment register to use for
8294 * this access. The base and limits are checked.
8295 * @param GCPtrMem The address of the guest memory.
8296 */
8297IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8298{
8299 /* The lazy approach for now... */
8300 uint64_t const *pu64Src;
8301 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8302 if (rc == VINF_SUCCESS)
8303 {
8304 *pu64Dst = *pu64Src;
8305 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8306 }
8307 return rc;
8308}
8309
8310
8311/**
8312 * Fetches a descriptor table entry with caller specified error code.
8313 *
8314 * @returns Strict VBox status code.
8315 * @param pIemCpu The IEM per CPU.
8316 * @param pDesc Where to return the descriptor table entry.
8317 * @param uSel The selector which table entry to fetch.
8318 * @param uXcpt The exception to raise on table lookup error.
8319 * @param uErrorCode The error code associated with the exception.
8320 */
8321IEM_STATIC VBOXSTRICTRC
8322iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8323{
8324 AssertPtr(pDesc);
8325 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8326
8327 /** @todo did the 286 require all 8 bytes to be accessible? */
8328 /*
8329 * Get the selector table base and check bounds.
8330 */
8331 RTGCPTR GCPtrBase;
8332 if (uSel & X86_SEL_LDT)
8333 {
8334 if ( !pCtx->ldtr.Attr.n.u1Present
8335 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8336 {
8337 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8338 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8339 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8340 uErrorCode, 0);
8341 }
8342
8343 Assert(pCtx->ldtr.Attr.n.u1Present);
8344 GCPtrBase = pCtx->ldtr.u64Base;
8345 }
8346 else
8347 {
8348 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8349 {
8350 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8351 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8352 uErrorCode, 0);
8353 }
8354 GCPtrBase = pCtx->gdtr.pGdt;
8355 }
8356
8357 /*
8358 * Read the legacy descriptor and maybe the long mode extensions if
8359 * required.
8360 */
8361 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8362 if (rcStrict == VINF_SUCCESS)
8363 {
8364 if ( !IEM_IS_LONG_MODE(pIemCpu)
8365 || pDesc->Legacy.Gen.u1DescType)
8366 pDesc->Long.au64[1] = 0;
8367 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8368 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8369 else
8370 {
8371 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8372 /** @todo is this the right exception? */
8373 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8374 }
8375 }
8376 return rcStrict;
8377}
8378
8379
8380/**
8381 * Fetches a descriptor table entry.
8382 *
8383 * @returns Strict VBox status code.
8384 * @param pIemCpu The IEM per CPU.
8385 * @param pDesc Where to return the descriptor table entry.
8386 * @param uSel The selector which table entry to fetch.
8387 * @param uXcpt The exception to raise on table lookup error.
8388 */
8389IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8390{
8391 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8392}
8393
8394
8395/**
8396 * Fakes a long mode stack selector for SS = 0.
8397 *
8398 * @param pDescSs Where to return the fake stack descriptor.
8399 * @param uDpl The DPL we want.
8400 */
8401IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8402{
8403 pDescSs->Long.au64[0] = 0;
8404 pDescSs->Long.au64[1] = 0;
8405 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8406 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8407 pDescSs->Long.Gen.u2Dpl = uDpl;
8408 pDescSs->Long.Gen.u1Present = 1;
8409 pDescSs->Long.Gen.u1Long = 1;
8410}
8411
8412
8413/**
8414 * Marks the selector descriptor as accessed (only non-system descriptors).
8415 *
8416 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8417 * will therefore skip the limit checks.
8418 *
8419 * @returns Strict VBox status code.
8420 * @param pIemCpu The IEM per CPU.
8421 * @param uSel The selector.
8422 */
8423IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8424{
8425 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8426
8427 /*
8428 * Get the selector table base and calculate the entry address.
8429 */
8430 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8431 ? pCtx->ldtr.u64Base
8432 : pCtx->gdtr.pGdt;
8433 GCPtr += uSel & X86_SEL_MASK;
8434
8435 /*
8436 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8437 * ugly stuff to avoid this. This will make sure it's an atomic access
8438 * as well more or less remove any question about 8-bit or 32-bit accesss.
8439 */
8440 VBOXSTRICTRC rcStrict;
8441 uint32_t volatile *pu32;
8442 if ((GCPtr & 3) == 0)
8443 {
8444 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8445 GCPtr += 2 + 2;
8446 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8447 if (rcStrict != VINF_SUCCESS)
8448 return rcStrict;
8449 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8450 }
8451 else
8452 {
8453 /* The misaligned GDT/LDT case, map the whole thing. */
8454 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8455 if (rcStrict != VINF_SUCCESS)
8456 return rcStrict;
8457 switch ((uintptr_t)pu32 & 3)
8458 {
8459 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8460 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8461 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8462 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8463 }
8464 }
8465
8466 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8467}
8468
8469/** @} */
8470
8471
8472/*
8473 * Include the C/C++ implementation of instruction.
8474 */
8475#include "IEMAllCImpl.cpp.h"
8476
8477
8478
8479/** @name "Microcode" macros.
8480 *
8481 * The idea is that we should be able to use the same code to interpret
8482 * instructions as well as recompiler instructions. Thus this obfuscation.
8483 *
8484 * @{
8485 */
8486#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8487#define IEM_MC_END() }
8488#define IEM_MC_PAUSE() do {} while (0)
8489#define IEM_MC_CONTINUE() do {} while (0)
8490
8491/** Internal macro. */
8492#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8493 do \
8494 { \
8495 VBOXSTRICTRC rcStrict2 = a_Expr; \
8496 if (rcStrict2 != VINF_SUCCESS) \
8497 return rcStrict2; \
8498 } while (0)
8499
8500#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8501#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8502#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8503#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8504#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8505#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8506#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8507
8508#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8509#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8510 do { \
8511 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8512 return iemRaiseDeviceNotAvailable(pIemCpu); \
8513 } while (0)
8514#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8515 do { \
8516 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8517 return iemRaiseMathFault(pIemCpu); \
8518 } while (0)
8519#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8520 do { \
8521 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8522 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8523 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8524 return iemRaiseUndefinedOpcode(pIemCpu); \
8525 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8526 return iemRaiseDeviceNotAvailable(pIemCpu); \
8527 } while (0)
8528#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8529 do { \
8530 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8531 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8532 return iemRaiseUndefinedOpcode(pIemCpu); \
8533 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8534 return iemRaiseDeviceNotAvailable(pIemCpu); \
8535 } while (0)
8536#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8537 do { \
8538 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8539 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8540 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8541 return iemRaiseUndefinedOpcode(pIemCpu); \
8542 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8543 return iemRaiseDeviceNotAvailable(pIemCpu); \
8544 } while (0)
8545#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8546 do { \
8547 if (pIemCpu->uCpl != 0) \
8548 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8549 } while (0)
8550
8551
8552#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8553#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8554#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8555#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8556#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8557#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8558#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8559 uint32_t a_Name; \
8560 uint32_t *a_pName = &a_Name
8561#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8562 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8563
8564#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8565#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8566
8567#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8568#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8569#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8570#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8571#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8572#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8573#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8574#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8575#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8576#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8577#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8578#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8579#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8580#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8581#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8582#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8583#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8584#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8585#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8586#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8587#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8588#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8589#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8590#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8591#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8592#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8593#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8594#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8595#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8596/** @note Not for IOPL or IF testing or modification. */
8597#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8598#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8599#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8600#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8601
8602#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8603#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8604#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8605#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8606#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8607#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8608#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8609#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8610#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8611#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8612#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8613 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8614
8615#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8616#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8617/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8618 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8619#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8620#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8621/** @note Not for IOPL or IF testing or modification. */
8622#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8623
8624#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8625#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8626#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8627 do { \
8628 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8629 *pu32Reg += (a_u32Value); \
8630 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8631 } while (0)
8632#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8633
8634#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8635#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8636#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8637 do { \
8638 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8639 *pu32Reg -= (a_u32Value); \
8640 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8641 } while (0)
8642#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8643#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
8644
8645#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8646#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8647#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8648#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8649#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8650#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8651#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8652
8653#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8654#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8655#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8656#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8657
8658#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8659#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8660#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8661
8662#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8663#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
8664#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8665
8666#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8667#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8668#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8669
8670#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8671#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8672#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8673
8674#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8675
8676#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8677
8678#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8679#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8680#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8681 do { \
8682 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8683 *pu32Reg &= (a_u32Value); \
8684 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8685 } while (0)
8686#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8687
8688#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8689#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8690#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8691 do { \
8692 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8693 *pu32Reg |= (a_u32Value); \
8694 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8695 } while (0)
8696#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8697
8698
8699/** @note Not for IOPL or IF modification. */
8700#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8701/** @note Not for IOPL or IF modification. */
8702#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8703/** @note Not for IOPL or IF modification. */
8704#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8705
8706#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8707
8708
8709#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8710 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8711#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8712 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8713#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8714 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8715#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8716 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8717#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8718 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8719#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8720 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8721#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8722 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8723
8724#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8725 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8726#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8727 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8728#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8729 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8730#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8731 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8732#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8733 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8734 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8735 } while (0)
8736#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8737 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8738 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8739 } while (0)
8740#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8741 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8742#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8743 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8744#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8745 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8746
8747#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8748 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8749#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8750 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8751#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8753
8754#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8755 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8756#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8758#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8760
8761#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8762 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8763#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8764 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8765#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8767
8768#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8769 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8770
8771#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8772 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8773#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8775#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8776 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8777#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8778 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8779
8780#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8781 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8782#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8783 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8784#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8785 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8786
8787#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8788 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8789#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8790 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8791
8792
8793
8794#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8795 do { \
8796 uint8_t u8Tmp; \
8797 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8798 (a_u16Dst) = u8Tmp; \
8799 } while (0)
8800#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8801 do { \
8802 uint8_t u8Tmp; \
8803 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8804 (a_u32Dst) = u8Tmp; \
8805 } while (0)
8806#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8807 do { \
8808 uint8_t u8Tmp; \
8809 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8810 (a_u64Dst) = u8Tmp; \
8811 } while (0)
8812#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8813 do { \
8814 uint16_t u16Tmp; \
8815 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8816 (a_u32Dst) = u16Tmp; \
8817 } while (0)
8818#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8819 do { \
8820 uint16_t u16Tmp; \
8821 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8822 (a_u64Dst) = u16Tmp; \
8823 } while (0)
8824#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8825 do { \
8826 uint32_t u32Tmp; \
8827 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8828 (a_u64Dst) = u32Tmp; \
8829 } while (0)
8830
8831#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8832 do { \
8833 uint8_t u8Tmp; \
8834 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8835 (a_u16Dst) = (int8_t)u8Tmp; \
8836 } while (0)
8837#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8838 do { \
8839 uint8_t u8Tmp; \
8840 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8841 (a_u32Dst) = (int8_t)u8Tmp; \
8842 } while (0)
8843#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8844 do { \
8845 uint8_t u8Tmp; \
8846 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8847 (a_u64Dst) = (int8_t)u8Tmp; \
8848 } while (0)
8849#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8850 do { \
8851 uint16_t u16Tmp; \
8852 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8853 (a_u32Dst) = (int16_t)u16Tmp; \
8854 } while (0)
8855#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8856 do { \
8857 uint16_t u16Tmp; \
8858 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8859 (a_u64Dst) = (int16_t)u16Tmp; \
8860 } while (0)
8861#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8862 do { \
8863 uint32_t u32Tmp; \
8864 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8865 (a_u64Dst) = (int32_t)u32Tmp; \
8866 } while (0)
8867
8868#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8869 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8870#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8871 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8872#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8873 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8874#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8875 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8876
8877#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8878 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8879#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8880 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8881#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8882 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8883#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8884 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8885
8886#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8887#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8888#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8889#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8890#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8891#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8892#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8893 do { \
8894 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8895 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8896 } while (0)
8897
8898#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8899 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8900#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8901 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8902
8903
8904#define IEM_MC_PUSH_U16(a_u16Value) \
8905 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8906#define IEM_MC_PUSH_U32(a_u32Value) \
8907 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8908#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8909 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8910#define IEM_MC_PUSH_U64(a_u64Value) \
8911 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8912
8913#define IEM_MC_POP_U16(a_pu16Value) \
8914 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8915#define IEM_MC_POP_U32(a_pu32Value) \
8916 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8917#define IEM_MC_POP_U64(a_pu64Value) \
8918 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8919
8920/** Maps guest memory for direct or bounce buffered access.
8921 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8922 * @remarks May return.
8923 */
8924#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8925 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8926
8927/** Maps guest memory for direct or bounce buffered access.
8928 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8929 * @remarks May return.
8930 */
8931#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8932 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8933
8934/** Commits the memory and unmaps the guest memory.
8935 * @remarks May return.
8936 */
8937#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8938 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8939
8940/** Commits the memory and unmaps the guest memory unless the FPU status word
8941 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8942 * that would cause FLD not to store.
8943 *
8944 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8945 * store, while \#P will not.
8946 *
8947 * @remarks May in theory return - for now.
8948 */
8949#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8950 do { \
8951 if ( !(a_u16FSW & X86_FSW_ES) \
8952 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8953 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8954 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8955 } while (0)
8956
8957/** Calculate efficient address from R/M. */
8958#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8959 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8960
8961#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8962#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8963#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8964#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8965#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8966#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8967#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8968
8969/**
8970 * Defers the rest of the instruction emulation to a C implementation routine
8971 * and returns, only taking the standard parameters.
8972 *
8973 * @param a_pfnCImpl The pointer to the C routine.
8974 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8975 */
8976#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8977
8978/**
8979 * Defers the rest of instruction emulation to a C implementation routine and
8980 * returns, taking one argument in addition to the standard ones.
8981 *
8982 * @param a_pfnCImpl The pointer to the C routine.
8983 * @param a0 The argument.
8984 */
8985#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8986
8987/**
8988 * Defers the rest of the instruction emulation to a C implementation routine
8989 * and returns, taking two arguments in addition to the standard ones.
8990 *
8991 * @param a_pfnCImpl The pointer to the C routine.
8992 * @param a0 The first extra argument.
8993 * @param a1 The second extra argument.
8994 */
8995#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8996
8997/**
8998 * Defers the rest of the instruction emulation to a C implementation routine
8999 * and returns, taking three arguments in addition to the standard ones.
9000 *
9001 * @param a_pfnCImpl The pointer to the C routine.
9002 * @param a0 The first extra argument.
9003 * @param a1 The second extra argument.
9004 * @param a2 The third extra argument.
9005 */
9006#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
9007
9008/**
9009 * Defers the rest of the instruction emulation to a C implementation routine
9010 * and returns, taking four arguments in addition to the standard ones.
9011 *
9012 * @param a_pfnCImpl The pointer to the C routine.
9013 * @param a0 The first extra argument.
9014 * @param a1 The second extra argument.
9015 * @param a2 The third extra argument.
9016 * @param a3 The fourth extra argument.
9017 */
9018#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
9019
9020/**
9021 * Defers the rest of the instruction emulation to a C implementation routine
9022 * and returns, taking two arguments in addition to the standard ones.
9023 *
9024 * @param a_pfnCImpl The pointer to the C routine.
9025 * @param a0 The first extra argument.
9026 * @param a1 The second extra argument.
9027 * @param a2 The third extra argument.
9028 * @param a3 The fourth extra argument.
9029 * @param a4 The fifth extra argument.
9030 */
9031#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
9032
9033/**
9034 * Defers the entire instruction emulation to a C implementation routine and
9035 * returns, only taking the standard parameters.
9036 *
9037 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9038 *
9039 * @param a_pfnCImpl The pointer to the C routine.
9040 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
9041 */
9042#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
9043
9044/**
9045 * Defers the entire instruction emulation to a C implementation routine and
9046 * returns, taking one argument in addition to the standard ones.
9047 *
9048 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9049 *
9050 * @param a_pfnCImpl The pointer to the C routine.
9051 * @param a0 The argument.
9052 */
9053#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
9054
9055/**
9056 * Defers the entire instruction emulation to a C implementation routine and
9057 * returns, taking two arguments in addition to the standard ones.
9058 *
9059 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9060 *
9061 * @param a_pfnCImpl The pointer to the C routine.
9062 * @param a0 The first extra argument.
9063 * @param a1 The second extra argument.
9064 */
9065#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
9066
9067/**
9068 * Defers the entire instruction emulation to a C implementation routine and
9069 * returns, taking three arguments in addition to the standard ones.
9070 *
9071 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9072 *
9073 * @param a_pfnCImpl The pointer to the C routine.
9074 * @param a0 The first extra argument.
9075 * @param a1 The second extra argument.
9076 * @param a2 The third extra argument.
9077 */
9078#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
9079
9080/**
9081 * Calls a FPU assembly implementation taking one visible argument.
9082 *
9083 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9084 * @param a0 The first extra argument.
9085 */
9086#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
9087 do { \
9088 iemFpuPrepareUsage(pIemCpu); \
9089 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
9090 } while (0)
9091
9092/**
9093 * Calls a FPU assembly implementation taking two visible arguments.
9094 *
9095 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9096 * @param a0 The first extra argument.
9097 * @param a1 The second extra argument.
9098 */
9099#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9100 do { \
9101 iemFpuPrepareUsage(pIemCpu); \
9102 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9103 } while (0)
9104
9105/**
9106 * Calls a FPU assembly implementation taking three visible arguments.
9107 *
9108 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9109 * @param a0 The first extra argument.
9110 * @param a1 The second extra argument.
9111 * @param a2 The third extra argument.
9112 */
9113#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9114 do { \
9115 iemFpuPrepareUsage(pIemCpu); \
9116 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9117 } while (0)
9118
9119#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9120 do { \
9121 (a_FpuData).FSW = (a_FSW); \
9122 (a_FpuData).r80Result = *(a_pr80Value); \
9123 } while (0)
9124
9125/** Pushes FPU result onto the stack. */
9126#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9127 iemFpuPushResult(pIemCpu, &a_FpuData)
9128/** Pushes FPU result onto the stack and sets the FPUDP. */
9129#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9130 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9131
9132/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9133#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9134 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9135
9136/** Stores FPU result in a stack register. */
9137#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9138 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9139/** Stores FPU result in a stack register and pops the stack. */
9140#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9141 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9142/** Stores FPU result in a stack register and sets the FPUDP. */
9143#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9144 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9145/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9146 * stack. */
9147#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9148 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9149
9150/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9151#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9152 iemFpuUpdateOpcodeAndIp(pIemCpu)
9153/** Free a stack register (for FFREE and FFREEP). */
9154#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9155 iemFpuStackFree(pIemCpu, a_iStReg)
9156/** Increment the FPU stack pointer. */
9157#define IEM_MC_FPU_STACK_INC_TOP() \
9158 iemFpuStackIncTop(pIemCpu)
9159/** Decrement the FPU stack pointer. */
9160#define IEM_MC_FPU_STACK_DEC_TOP() \
9161 iemFpuStackDecTop(pIemCpu)
9162
9163/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9164#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9165 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9166/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9167#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9168 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9169/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9170#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9171 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9172/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9173#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9174 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9175/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9176 * stack. */
9177#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9178 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9179/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9180#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9181 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9182
9183/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9184#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9185 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9186/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9187 * stack. */
9188#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9189 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9190/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9191 * FPUDS. */
9192#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9193 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9194/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9195 * FPUDS. Pops stack. */
9196#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9197 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9198/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9199 * stack twice. */
9200#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9201 iemFpuStackUnderflowThenPopPop(pIemCpu)
9202/** Raises a FPU stack underflow exception for an instruction pushing a result
9203 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9204#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9205 iemFpuStackPushUnderflow(pIemCpu)
9206/** Raises a FPU stack underflow exception for an instruction pushing a result
9207 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9208#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9209 iemFpuStackPushUnderflowTwo(pIemCpu)
9210
9211/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9212 * FPUIP, FPUCS and FOP. */
9213#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9214 iemFpuStackPushOverflow(pIemCpu)
9215/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9216 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9217#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9218 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9219/** Indicates that we (might) have modified the FPU state. */
9220#define IEM_MC_USED_FPU() \
9221 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9222
9223/**
9224 * Calls a MMX assembly implementation taking two visible arguments.
9225 *
9226 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9227 * @param a0 The first extra argument.
9228 * @param a1 The second extra argument.
9229 */
9230#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9231 do { \
9232 iemFpuPrepareUsage(pIemCpu); \
9233 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9234 } while (0)
9235
9236/**
9237 * Calls a MMX assembly implementation taking three visible arguments.
9238 *
9239 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9240 * @param a0 The first extra argument.
9241 * @param a1 The second extra argument.
9242 * @param a2 The third extra argument.
9243 */
9244#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9245 do { \
9246 iemFpuPrepareUsage(pIemCpu); \
9247 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9248 } while (0)
9249
9250
9251/**
9252 * Calls a SSE assembly implementation taking two visible arguments.
9253 *
9254 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9255 * @param a0 The first extra argument.
9256 * @param a1 The second extra argument.
9257 */
9258#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9259 do { \
9260 iemFpuPrepareUsageSse(pIemCpu); \
9261 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9262 } while (0)
9263
9264/**
9265 * Calls a SSE assembly implementation taking three visible arguments.
9266 *
9267 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9268 * @param a0 The first extra argument.
9269 * @param a1 The second extra argument.
9270 * @param a2 The third extra argument.
9271 */
9272#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9273 do { \
9274 iemFpuPrepareUsageSse(pIemCpu); \
9275 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9276 } while (0)
9277
9278
9279/** @note Not for IOPL or IF testing. */
9280#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9281/** @note Not for IOPL or IF testing. */
9282#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9283/** @note Not for IOPL or IF testing. */
9284#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9285/** @note Not for IOPL or IF testing. */
9286#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9287/** @note Not for IOPL or IF testing. */
9288#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9289 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9290 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9291/** @note Not for IOPL or IF testing. */
9292#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9293 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9294 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9295/** @note Not for IOPL or IF testing. */
9296#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9297 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9298 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9299 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9300/** @note Not for IOPL or IF testing. */
9301#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9302 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9303 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9304 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9305#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9306#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9307#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9308/** @note Not for IOPL or IF testing. */
9309#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9310 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9311 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9312/** @note Not for IOPL or IF testing. */
9313#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9314 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9315 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9316/** @note Not for IOPL or IF testing. */
9317#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9318 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9319 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9320/** @note Not for IOPL or IF testing. */
9321#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9322 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9323 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9324/** @note Not for IOPL or IF testing. */
9325#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9326 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9327 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9328/** @note Not for IOPL or IF testing. */
9329#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9330 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9331 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9332#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9333#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9334#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9335 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9336#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9337 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9338#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9339 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9340#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9341 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9342#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9343 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9344#define IEM_MC_IF_FCW_IM() \
9345 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9346
9347#define IEM_MC_ELSE() } else {
9348#define IEM_MC_ENDIF() } do {} while (0)
9349
9350/** @} */
9351
9352
9353/** @name Opcode Debug Helpers.
9354 * @{
9355 */
9356#ifdef DEBUG
9357# define IEMOP_MNEMONIC(a_szMnemonic) \
9358 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9359 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9360# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9361 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9362 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9363#else
9364# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9365# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9366#endif
9367
9368/** @} */
9369
9370
9371/** @name Opcode Helpers.
9372 * @{
9373 */
9374
9375#ifdef IN_RING3
9376# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9377 do { \
9378 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9379 else \
9380 { \
9381 DBGFSTOP(IEMCPU_TO_VM(pIemCpu)); \
9382 return IEMOP_RAISE_INVALID_OPCODE(); \
9383 } \
9384 } while (0)
9385#else
9386# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9387 do { \
9388 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9389 else return IEMOP_RAISE_INVALID_OPCODE(); \
9390 } while (0)
9391#endif
9392
9393/** The instruction requires a 186 or later. */
9394#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
9395# define IEMOP_HLP_MIN_186() do { } while (0)
9396#else
9397# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
9398#endif
9399
9400/** The instruction requires a 286 or later. */
9401#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
9402# define IEMOP_HLP_MIN_286() do { } while (0)
9403#else
9404# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
9405#endif
9406
9407/** The instruction requires a 386 or later. */
9408#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9409# define IEMOP_HLP_MIN_386() do { } while (0)
9410#else
9411# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
9412#endif
9413
9414/** The instruction requires a 386 or later if the given expression is true. */
9415#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9416# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
9417#else
9418# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
9419#endif
9420
9421/** The instruction requires a 486 or later. */
9422#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
9423# define IEMOP_HLP_MIN_486() do { } while (0)
9424#else
9425# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
9426#endif
9427
9428/** The instruction requires a Pentium (586) or later. */
9429#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_586
9430# define IEMOP_HLP_MIN_586() do { } while (0)
9431#else
9432# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_586, true)
9433#endif
9434
9435/** The instruction requires a PentiumPro (686) or later. */
9436#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_686
9437# define IEMOP_HLP_MIN_686() do { } while (0)
9438#else
9439# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_686, true)
9440#endif
9441
9442
9443/** The instruction raises an \#UD in real and V8086 mode. */
9444#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9445 do \
9446 { \
9447 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9448 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9449 } while (0)
9450
9451/** The instruction allows no lock prefixing (in this encoding), throw \#UD if
9452 * lock prefixed.
9453 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9454#define IEMOP_HLP_NO_LOCK_PREFIX() \
9455 do \
9456 { \
9457 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9458 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9459 } while (0)
9460
9461/** The instruction is not available in 64-bit mode, throw \#UD if we're in
9462 * 64-bit mode. */
9463#define IEMOP_HLP_NO_64BIT() \
9464 do \
9465 { \
9466 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9467 return IEMOP_RAISE_INVALID_OPCODE(); \
9468 } while (0)
9469
9470/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
9471 * 64-bit mode. */
9472#define IEMOP_HLP_ONLY_64BIT() \
9473 do \
9474 { \
9475 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9476 return IEMOP_RAISE_INVALID_OPCODE(); \
9477 } while (0)
9478
9479/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9480#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9481 do \
9482 { \
9483 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9484 iemRecalEffOpSize64Default(pIemCpu); \
9485 } while (0)
9486
9487/** The instruction has 64-bit operand size if 64-bit mode. */
9488#define IEMOP_HLP_64BIT_OP_SIZE() \
9489 do \
9490 { \
9491 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9492 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9493 } while (0)
9494
9495/** Only a REX prefix immediately preceeding the first opcode byte takes
9496 * effect. This macro helps ensuring this as well as logging bad guest code. */
9497#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9498 do \
9499 { \
9500 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9501 { \
9502 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9503 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9504 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9505 pIemCpu->uRexB = 0; \
9506 pIemCpu->uRexIndex = 0; \
9507 pIemCpu->uRexReg = 0; \
9508 iemRecalEffOpSize(pIemCpu); \
9509 } \
9510 } while (0)
9511
9512/**
9513 * Done decoding.
9514 */
9515#define IEMOP_HLP_DONE_DECODING() \
9516 do \
9517 { \
9518 /*nothing for now, maybe later... */ \
9519 } while (0)
9520
9521/**
9522 * Done decoding, raise \#UD exception if lock prefix present.
9523 */
9524#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9525 do \
9526 { \
9527 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9528 { /* likely */ } \
9529 else \
9530 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9531 } while (0)
9532#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9533 do \
9534 { \
9535 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9536 { /* likely */ } \
9537 else \
9538 { \
9539 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9540 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9541 } \
9542 } while (0)
9543#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9544 do \
9545 { \
9546 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9547 { /* likely */ } \
9548 else \
9549 { \
9550 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9551 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9552 } \
9553 } while (0)
9554/**
9555 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9556 * are present.
9557 */
9558#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9559 do \
9560 { \
9561 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9562 { /* likely */ } \
9563 else \
9564 return IEMOP_RAISE_INVALID_OPCODE(); \
9565 } while (0)
9566
9567
9568/**
9569 * Calculates the effective address of a ModR/M memory operand.
9570 *
9571 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9572 *
9573 * @return Strict VBox status code.
9574 * @param pIemCpu The IEM per CPU data.
9575 * @param bRm The ModRM byte.
9576 * @param cbImm The size of any immediate following the
9577 * effective address opcode bytes. Important for
9578 * RIP relative addressing.
9579 * @param pGCPtrEff Where to return the effective address.
9580 */
9581IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9582{
9583 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9584 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9585#define SET_SS_DEF() \
9586 do \
9587 { \
9588 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9589 pIemCpu->iEffSeg = X86_SREG_SS; \
9590 } while (0)
9591
9592 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9593 {
9594/** @todo Check the effective address size crap! */
9595 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9596 {
9597 uint16_t u16EffAddr;
9598
9599 /* Handle the disp16 form with no registers first. */
9600 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9601 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9602 else
9603 {
9604 /* Get the displacment. */
9605 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9606 {
9607 case 0: u16EffAddr = 0; break;
9608 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9609 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9610 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9611 }
9612
9613 /* Add the base and index registers to the disp. */
9614 switch (bRm & X86_MODRM_RM_MASK)
9615 {
9616 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9617 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9618 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9619 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9620 case 4: u16EffAddr += pCtx->si; break;
9621 case 5: u16EffAddr += pCtx->di; break;
9622 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9623 case 7: u16EffAddr += pCtx->bx; break;
9624 }
9625 }
9626
9627 *pGCPtrEff = u16EffAddr;
9628 }
9629 else
9630 {
9631 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9632 uint32_t u32EffAddr;
9633
9634 /* Handle the disp32 form with no registers first. */
9635 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9636 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9637 else
9638 {
9639 /* Get the register (or SIB) value. */
9640 switch ((bRm & X86_MODRM_RM_MASK))
9641 {
9642 case 0: u32EffAddr = pCtx->eax; break;
9643 case 1: u32EffAddr = pCtx->ecx; break;
9644 case 2: u32EffAddr = pCtx->edx; break;
9645 case 3: u32EffAddr = pCtx->ebx; break;
9646 case 4: /* SIB */
9647 {
9648 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9649
9650 /* Get the index and scale it. */
9651 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9652 {
9653 case 0: u32EffAddr = pCtx->eax; break;
9654 case 1: u32EffAddr = pCtx->ecx; break;
9655 case 2: u32EffAddr = pCtx->edx; break;
9656 case 3: u32EffAddr = pCtx->ebx; break;
9657 case 4: u32EffAddr = 0; /*none */ break;
9658 case 5: u32EffAddr = pCtx->ebp; break;
9659 case 6: u32EffAddr = pCtx->esi; break;
9660 case 7: u32EffAddr = pCtx->edi; break;
9661 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9662 }
9663 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9664
9665 /* add base */
9666 switch (bSib & X86_SIB_BASE_MASK)
9667 {
9668 case 0: u32EffAddr += pCtx->eax; break;
9669 case 1: u32EffAddr += pCtx->ecx; break;
9670 case 2: u32EffAddr += pCtx->edx; break;
9671 case 3: u32EffAddr += pCtx->ebx; break;
9672 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9673 case 5:
9674 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9675 {
9676 u32EffAddr += pCtx->ebp;
9677 SET_SS_DEF();
9678 }
9679 else
9680 {
9681 uint32_t u32Disp;
9682 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9683 u32EffAddr += u32Disp;
9684 }
9685 break;
9686 case 6: u32EffAddr += pCtx->esi; break;
9687 case 7: u32EffAddr += pCtx->edi; break;
9688 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9689 }
9690 break;
9691 }
9692 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9693 case 6: u32EffAddr = pCtx->esi; break;
9694 case 7: u32EffAddr = pCtx->edi; break;
9695 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9696 }
9697
9698 /* Get and add the displacement. */
9699 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9700 {
9701 case 0:
9702 break;
9703 case 1:
9704 {
9705 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9706 u32EffAddr += i8Disp;
9707 break;
9708 }
9709 case 2:
9710 {
9711 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9712 u32EffAddr += u32Disp;
9713 break;
9714 }
9715 default:
9716 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9717 }
9718
9719 }
9720 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9721 *pGCPtrEff = u32EffAddr;
9722 else
9723 {
9724 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9725 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9726 }
9727 }
9728 }
9729 else
9730 {
9731 uint64_t u64EffAddr;
9732
9733 /* Handle the rip+disp32 form with no registers first. */
9734 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9735 {
9736 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9737 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9738 }
9739 else
9740 {
9741 /* Get the register (or SIB) value. */
9742 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9743 {
9744 case 0: u64EffAddr = pCtx->rax; break;
9745 case 1: u64EffAddr = pCtx->rcx; break;
9746 case 2: u64EffAddr = pCtx->rdx; break;
9747 case 3: u64EffAddr = pCtx->rbx; break;
9748 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9749 case 6: u64EffAddr = pCtx->rsi; break;
9750 case 7: u64EffAddr = pCtx->rdi; break;
9751 case 8: u64EffAddr = pCtx->r8; break;
9752 case 9: u64EffAddr = pCtx->r9; break;
9753 case 10: u64EffAddr = pCtx->r10; break;
9754 case 11: u64EffAddr = pCtx->r11; break;
9755 case 13: u64EffAddr = pCtx->r13; break;
9756 case 14: u64EffAddr = pCtx->r14; break;
9757 case 15: u64EffAddr = pCtx->r15; break;
9758 /* SIB */
9759 case 4:
9760 case 12:
9761 {
9762 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9763
9764 /* Get the index and scale it. */
9765 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9766 {
9767 case 0: u64EffAddr = pCtx->rax; break;
9768 case 1: u64EffAddr = pCtx->rcx; break;
9769 case 2: u64EffAddr = pCtx->rdx; break;
9770 case 3: u64EffAddr = pCtx->rbx; break;
9771 case 4: u64EffAddr = 0; /*none */ break;
9772 case 5: u64EffAddr = pCtx->rbp; break;
9773 case 6: u64EffAddr = pCtx->rsi; break;
9774 case 7: u64EffAddr = pCtx->rdi; break;
9775 case 8: u64EffAddr = pCtx->r8; break;
9776 case 9: u64EffAddr = pCtx->r9; break;
9777 case 10: u64EffAddr = pCtx->r10; break;
9778 case 11: u64EffAddr = pCtx->r11; break;
9779 case 12: u64EffAddr = pCtx->r12; break;
9780 case 13: u64EffAddr = pCtx->r13; break;
9781 case 14: u64EffAddr = pCtx->r14; break;
9782 case 15: u64EffAddr = pCtx->r15; break;
9783 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9784 }
9785 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9786
9787 /* add base */
9788 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9789 {
9790 case 0: u64EffAddr += pCtx->rax; break;
9791 case 1: u64EffAddr += pCtx->rcx; break;
9792 case 2: u64EffAddr += pCtx->rdx; break;
9793 case 3: u64EffAddr += pCtx->rbx; break;
9794 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9795 case 6: u64EffAddr += pCtx->rsi; break;
9796 case 7: u64EffAddr += pCtx->rdi; break;
9797 case 8: u64EffAddr += pCtx->r8; break;
9798 case 9: u64EffAddr += pCtx->r9; break;
9799 case 10: u64EffAddr += pCtx->r10; break;
9800 case 11: u64EffAddr += pCtx->r11; break;
9801 case 12: u64EffAddr += pCtx->r12; break;
9802 case 14: u64EffAddr += pCtx->r14; break;
9803 case 15: u64EffAddr += pCtx->r15; break;
9804 /* complicated encodings */
9805 case 5:
9806 case 13:
9807 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9808 {
9809 if (!pIemCpu->uRexB)
9810 {
9811 u64EffAddr += pCtx->rbp;
9812 SET_SS_DEF();
9813 }
9814 else
9815 u64EffAddr += pCtx->r13;
9816 }
9817 else
9818 {
9819 uint32_t u32Disp;
9820 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9821 u64EffAddr += (int32_t)u32Disp;
9822 }
9823 break;
9824 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9825 }
9826 break;
9827 }
9828 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9829 }
9830
9831 /* Get and add the displacement. */
9832 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9833 {
9834 case 0:
9835 break;
9836 case 1:
9837 {
9838 int8_t i8Disp;
9839 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9840 u64EffAddr += i8Disp;
9841 break;
9842 }
9843 case 2:
9844 {
9845 uint32_t u32Disp;
9846 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9847 u64EffAddr += (int32_t)u32Disp;
9848 break;
9849 }
9850 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9851 }
9852
9853 }
9854
9855 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9856 *pGCPtrEff = u64EffAddr;
9857 else
9858 {
9859 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9860 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9861 }
9862 }
9863
9864 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9865 return VINF_SUCCESS;
9866}
9867
9868/** @} */
9869
9870
9871
9872/*
9873 * Include the instructions
9874 */
9875#include "IEMAllInstructions.cpp.h"
9876
9877
9878
9879
9880#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9881
9882/**
9883 * Sets up execution verification mode.
9884 */
9885IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9886{
9887 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9888 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9889
9890 /*
9891 * Always note down the address of the current instruction.
9892 */
9893 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9894 pIemCpu->uOldRip = pOrgCtx->rip;
9895
9896 /*
9897 * Enable verification and/or logging.
9898 */
9899 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9900 if ( fNewNoRem
9901 && ( 0
9902#if 0 /* auto enable on first paged protected mode interrupt */
9903 || ( pOrgCtx->eflags.Bits.u1IF
9904 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9905 && TRPMHasTrap(pVCpu)
9906 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9907#endif
9908#if 0
9909 || ( pOrgCtx->cs == 0x10
9910 && ( pOrgCtx->rip == 0x90119e3e
9911 || pOrgCtx->rip == 0x901d9810)
9912#endif
9913#if 0 /* Auto enable DSL - FPU stuff. */
9914 || ( pOrgCtx->cs == 0x10
9915 && (// pOrgCtx->rip == 0xc02ec07f
9916 //|| pOrgCtx->rip == 0xc02ec082
9917 //|| pOrgCtx->rip == 0xc02ec0c9
9918 0
9919 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9920#endif
9921#if 0 /* Auto enable DSL - fstp st0 stuff. */
9922 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9923#endif
9924#if 0
9925 || pOrgCtx->rip == 0x9022bb3a
9926#endif
9927#if 0
9928 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9929#endif
9930#if 0
9931 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9932 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9933#endif
9934#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9935 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9936 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9937 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9938#endif
9939#if 0 /* NT4SP1 - xadd early boot. */
9940 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9941#endif
9942#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9943 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9944#endif
9945#if 0 /* NT4SP1 - cmpxchg (AMD). */
9946 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9947#endif
9948#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9949 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9950#endif
9951#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9952 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9953
9954#endif
9955#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9956 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9957
9958#endif
9959#if 0 /* NT4SP1 - frstor [ecx] */
9960 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9961#endif
9962#if 0 /* xxxxxx - All long mode code. */
9963 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9964#endif
9965#if 0 /* rep movsq linux 3.7 64-bit boot. */
9966 || (pOrgCtx->rip == 0x0000000000100241)
9967#endif
9968#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9969 || (pOrgCtx->rip == 0x000000000215e240)
9970#endif
9971#if 0 /* DOS's size-overridden iret to v8086. */
9972 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9973#endif
9974 )
9975 )
9976 {
9977 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9978 RTLogFlags(NULL, "enabled");
9979 fNewNoRem = false;
9980 }
9981 if (fNewNoRem != pIemCpu->fNoRem)
9982 {
9983 pIemCpu->fNoRem = fNewNoRem;
9984 if (!fNewNoRem)
9985 {
9986 LogAlways(("Enabling verification mode!\n"));
9987 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9988 }
9989 else
9990 LogAlways(("Disabling verification mode!\n"));
9991 }
9992
9993 /*
9994 * Switch state.
9995 */
9996 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9997 {
9998 static CPUMCTX s_DebugCtx; /* Ugly! */
9999
10000 s_DebugCtx = *pOrgCtx;
10001 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
10002 }
10003
10004 /*
10005 * See if there is an interrupt pending in TRPM and inject it if we can.
10006 */
10007 pIemCpu->uInjectCpl = UINT8_MAX;
10008 if ( pOrgCtx->eflags.Bits.u1IF
10009 && TRPMHasTrap(pVCpu)
10010 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
10011 {
10012 uint8_t u8TrapNo;
10013 TRPMEVENT enmType;
10014 RTGCUINT uErrCode;
10015 RTGCPTR uCr2;
10016 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
10017 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
10018 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10019 TRPMResetTrap(pVCpu);
10020 pIemCpu->uInjectCpl = pIemCpu->uCpl;
10021 }
10022
10023 /*
10024 * Reset the counters.
10025 */
10026 pIemCpu->cIOReads = 0;
10027 pIemCpu->cIOWrites = 0;
10028 pIemCpu->fIgnoreRaxRdx = false;
10029 pIemCpu->fOverlappingMovs = false;
10030 pIemCpu->fProblematicMemory = false;
10031 pIemCpu->fUndefinedEFlags = 0;
10032
10033 if (IEM_VERIFICATION_ENABLED(pIemCpu))
10034 {
10035 /*
10036 * Free all verification records.
10037 */
10038 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
10039 pIemCpu->pIemEvtRecHead = NULL;
10040 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
10041 do
10042 {
10043 while (pEvtRec)
10044 {
10045 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
10046 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
10047 pIemCpu->pFreeEvtRec = pEvtRec;
10048 pEvtRec = pNext;
10049 }
10050 pEvtRec = pIemCpu->pOtherEvtRecHead;
10051 pIemCpu->pOtherEvtRecHead = NULL;
10052 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
10053 } while (pEvtRec);
10054 }
10055}
10056
10057
10058/**
10059 * Allocate an event record.
10060 * @returns Pointer to a record.
10061 */
10062IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
10063{
10064 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10065 return NULL;
10066
10067 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
10068 if (pEvtRec)
10069 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
10070 else
10071 {
10072 if (!pIemCpu->ppIemEvtRecNext)
10073 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
10074
10075 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
10076 if (!pEvtRec)
10077 return NULL;
10078 }
10079 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
10080 pEvtRec->pNext = NULL;
10081 return pEvtRec;
10082}
10083
10084
10085/**
10086 * IOMMMIORead notification.
10087 */
10088VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
10089{
10090 PVMCPU pVCpu = VMMGetCpu(pVM);
10091 if (!pVCpu)
10092 return;
10093 PIEMCPU pIemCpu = &pVCpu->iem.s;
10094 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10095 if (!pEvtRec)
10096 return;
10097 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
10098 pEvtRec->u.RamRead.GCPhys = GCPhys;
10099 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
10100 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10101 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10102}
10103
10104
10105/**
10106 * IOMMMIOWrite notification.
10107 */
10108VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
10109{
10110 PVMCPU pVCpu = VMMGetCpu(pVM);
10111 if (!pVCpu)
10112 return;
10113 PIEMCPU pIemCpu = &pVCpu->iem.s;
10114 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10115 if (!pEvtRec)
10116 return;
10117 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
10118 pEvtRec->u.RamWrite.GCPhys = GCPhys;
10119 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
10120 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
10121 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
10122 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
10123 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
10124 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10125 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10126}
10127
10128
10129/**
10130 * IOMIOPortRead notification.
10131 */
10132VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
10133{
10134 PVMCPU pVCpu = VMMGetCpu(pVM);
10135 if (!pVCpu)
10136 return;
10137 PIEMCPU pIemCpu = &pVCpu->iem.s;
10138 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10139 if (!pEvtRec)
10140 return;
10141 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10142 pEvtRec->u.IOPortRead.Port = Port;
10143 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10144 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10145 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10146}
10147
10148/**
10149 * IOMIOPortWrite notification.
10150 */
10151VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10152{
10153 PVMCPU pVCpu = VMMGetCpu(pVM);
10154 if (!pVCpu)
10155 return;
10156 PIEMCPU pIemCpu = &pVCpu->iem.s;
10157 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10158 if (!pEvtRec)
10159 return;
10160 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10161 pEvtRec->u.IOPortWrite.Port = Port;
10162 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10163 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10164 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10165 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10166}
10167
10168
10169VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10170{
10171 PVMCPU pVCpu = VMMGetCpu(pVM);
10172 if (!pVCpu)
10173 return;
10174 PIEMCPU pIemCpu = &pVCpu->iem.s;
10175 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10176 if (!pEvtRec)
10177 return;
10178 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
10179 pEvtRec->u.IOPortStrRead.Port = Port;
10180 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
10181 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
10182 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10183 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10184}
10185
10186
10187VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10188{
10189 PVMCPU pVCpu = VMMGetCpu(pVM);
10190 if (!pVCpu)
10191 return;
10192 PIEMCPU pIemCpu = &pVCpu->iem.s;
10193 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10194 if (!pEvtRec)
10195 return;
10196 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
10197 pEvtRec->u.IOPortStrWrite.Port = Port;
10198 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
10199 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
10200 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10201 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10202}
10203
10204
10205/**
10206 * Fakes and records an I/O port read.
10207 *
10208 * @returns VINF_SUCCESS.
10209 * @param pIemCpu The IEM per CPU data.
10210 * @param Port The I/O port.
10211 * @param pu32Value Where to store the fake value.
10212 * @param cbValue The size of the access.
10213 */
10214IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10215{
10216 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10217 if (pEvtRec)
10218 {
10219 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10220 pEvtRec->u.IOPortRead.Port = Port;
10221 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10222 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10223 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10224 }
10225 pIemCpu->cIOReads++;
10226 *pu32Value = 0xcccccccc;
10227 return VINF_SUCCESS;
10228}
10229
10230
10231/**
10232 * Fakes and records an I/O port write.
10233 *
10234 * @returns VINF_SUCCESS.
10235 * @param pIemCpu The IEM per CPU data.
10236 * @param Port The I/O port.
10237 * @param u32Value The value being written.
10238 * @param cbValue The size of the access.
10239 */
10240IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10241{
10242 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10243 if (pEvtRec)
10244 {
10245 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10246 pEvtRec->u.IOPortWrite.Port = Port;
10247 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10248 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10249 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10250 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10251 }
10252 pIemCpu->cIOWrites++;
10253 return VINF_SUCCESS;
10254}
10255
10256
10257/**
10258 * Used to add extra details about a stub case.
10259 * @param pIemCpu The IEM per CPU state.
10260 */
10261IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10262{
10263 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10264 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10265 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10266 char szRegs[4096];
10267 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10268 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10269 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10270 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10271 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10272 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10273 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10274 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10275 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10276 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10277 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10278 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10279 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10280 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10281 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10282 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10283 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10284 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10285 " efer=%016VR{efer}\n"
10286 " pat=%016VR{pat}\n"
10287 " sf_mask=%016VR{sf_mask}\n"
10288 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10289 " lstar=%016VR{lstar}\n"
10290 " star=%016VR{star} cstar=%016VR{cstar}\n"
10291 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10292 );
10293
10294 char szInstr1[256];
10295 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10296 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10297 szInstr1, sizeof(szInstr1), NULL);
10298 char szInstr2[256];
10299 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10300 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10301 szInstr2, sizeof(szInstr2), NULL);
10302
10303 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10304}
10305
10306
10307/**
10308 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10309 * dump to the assertion info.
10310 *
10311 * @param pEvtRec The record to dump.
10312 */
10313IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10314{
10315 switch (pEvtRec->enmEvent)
10316 {
10317 case IEMVERIFYEVENT_IOPORT_READ:
10318 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10319 pEvtRec->u.IOPortWrite.Port,
10320 pEvtRec->u.IOPortWrite.cbValue);
10321 break;
10322 case IEMVERIFYEVENT_IOPORT_WRITE:
10323 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10324 pEvtRec->u.IOPortWrite.Port,
10325 pEvtRec->u.IOPortWrite.cbValue,
10326 pEvtRec->u.IOPortWrite.u32Value);
10327 break;
10328 case IEMVERIFYEVENT_IOPORT_STR_READ:
10329 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
10330 pEvtRec->u.IOPortStrWrite.Port,
10331 pEvtRec->u.IOPortStrWrite.cbValue,
10332 pEvtRec->u.IOPortStrWrite.cTransfers);
10333 break;
10334 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10335 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
10336 pEvtRec->u.IOPortStrWrite.Port,
10337 pEvtRec->u.IOPortStrWrite.cbValue,
10338 pEvtRec->u.IOPortStrWrite.cTransfers);
10339 break;
10340 case IEMVERIFYEVENT_RAM_READ:
10341 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10342 pEvtRec->u.RamRead.GCPhys,
10343 pEvtRec->u.RamRead.cb);
10344 break;
10345 case IEMVERIFYEVENT_RAM_WRITE:
10346 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10347 pEvtRec->u.RamWrite.GCPhys,
10348 pEvtRec->u.RamWrite.cb,
10349 (int)pEvtRec->u.RamWrite.cb,
10350 pEvtRec->u.RamWrite.ab);
10351 break;
10352 default:
10353 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10354 break;
10355 }
10356}
10357
10358
10359/**
10360 * Raises an assertion on the specified record, showing the given message with
10361 * a record dump attached.
10362 *
10363 * @param pIemCpu The IEM per CPU data.
10364 * @param pEvtRec1 The first record.
10365 * @param pEvtRec2 The second record.
10366 * @param pszMsg The message explaining why we're asserting.
10367 */
10368IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10369{
10370 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10371 iemVerifyAssertAddRecordDump(pEvtRec1);
10372 iemVerifyAssertAddRecordDump(pEvtRec2);
10373 iemVerifyAssertMsg2(pIemCpu);
10374 RTAssertPanic();
10375}
10376
10377
10378/**
10379 * Raises an assertion on the specified record, showing the given message with
10380 * a record dump attached.
10381 *
10382 * @param pIemCpu The IEM per CPU data.
10383 * @param pEvtRec1 The first record.
10384 * @param pszMsg The message explaining why we're asserting.
10385 */
10386IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10387{
10388 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10389 iemVerifyAssertAddRecordDump(pEvtRec);
10390 iemVerifyAssertMsg2(pIemCpu);
10391 RTAssertPanic();
10392}
10393
10394
10395/**
10396 * Verifies a write record.
10397 *
10398 * @param pIemCpu The IEM per CPU data.
10399 * @param pEvtRec The write record.
10400 * @param fRem Set if REM was doing the other executing. If clear
10401 * it was HM.
10402 */
10403IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10404{
10405 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10406 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10407 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10408 if ( RT_FAILURE(rc)
10409 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10410 {
10411 /* fend off ins */
10412 if ( !pIemCpu->cIOReads
10413 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10414 || ( pEvtRec->u.RamWrite.cb != 1
10415 && pEvtRec->u.RamWrite.cb != 2
10416 && pEvtRec->u.RamWrite.cb != 4) )
10417 {
10418 /* fend off ROMs and MMIO */
10419 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10420 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10421 {
10422 /* fend off fxsave */
10423 if (pEvtRec->u.RamWrite.cb != 512)
10424 {
10425 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10426 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10427 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10428 RTAssertMsg2Add("%s: %.*Rhxs\n"
10429 "iem: %.*Rhxs\n",
10430 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10431 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10432 iemVerifyAssertAddRecordDump(pEvtRec);
10433 iemVerifyAssertMsg2(pIemCpu);
10434 RTAssertPanic();
10435 }
10436 }
10437 }
10438 }
10439
10440}
10441
10442/**
10443 * Performs the post-execution verfication checks.
10444 */
10445IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10446{
10447 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10448 return;
10449
10450 /*
10451 * Switch back the state.
10452 */
10453 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10454 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10455 Assert(pOrgCtx != pDebugCtx);
10456 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10457
10458 /*
10459 * Execute the instruction in REM.
10460 */
10461 bool fRem = false;
10462 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10463 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10464 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10465#ifdef IEM_VERIFICATION_MODE_FULL_HM
10466 if ( HMIsEnabled(pVM)
10467 && pIemCpu->cIOReads == 0
10468 && pIemCpu->cIOWrites == 0
10469 && !pIemCpu->fProblematicMemory)
10470 {
10471 uint64_t uStartRip = pOrgCtx->rip;
10472 unsigned iLoops = 0;
10473 do
10474 {
10475 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10476 iLoops++;
10477 } while ( rc == VINF_SUCCESS
10478 || ( rc == VINF_EM_DBG_STEPPED
10479 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10480 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10481 || ( pOrgCtx->rip != pDebugCtx->rip
10482 && pIemCpu->uInjectCpl != UINT8_MAX
10483 && iLoops < 8) );
10484 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10485 rc = VINF_SUCCESS;
10486 }
10487#endif
10488 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10489 || rc == VINF_IOM_R3_IOPORT_READ
10490 || rc == VINF_IOM_R3_IOPORT_WRITE
10491 || rc == VINF_IOM_R3_MMIO_READ
10492 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10493 || rc == VINF_IOM_R3_MMIO_WRITE
10494 || rc == VINF_CPUM_R3_MSR_READ
10495 || rc == VINF_CPUM_R3_MSR_WRITE
10496 || rc == VINF_EM_RESCHEDULE
10497 )
10498 {
10499 EMRemLock(pVM);
10500 rc = REMR3EmulateInstruction(pVM, pVCpu);
10501 AssertRC(rc);
10502 EMRemUnlock(pVM);
10503 fRem = true;
10504 }
10505
10506 /*
10507 * Compare the register states.
10508 */
10509 unsigned cDiffs = 0;
10510 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10511 {
10512 //Log(("REM and IEM ends up with different registers!\n"));
10513 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10514
10515# define CHECK_FIELD(a_Field) \
10516 do \
10517 { \
10518 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10519 { \
10520 switch (sizeof(pOrgCtx->a_Field)) \
10521 { \
10522 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10523 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10524 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10525 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10526 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10527 } \
10528 cDiffs++; \
10529 } \
10530 } while (0)
10531# define CHECK_XSTATE_FIELD(a_Field) \
10532 do \
10533 { \
10534 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10535 { \
10536 switch (sizeof(pOrgXState->a_Field)) \
10537 { \
10538 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10539 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10540 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10541 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10542 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10543 } \
10544 cDiffs++; \
10545 } \
10546 } while (0)
10547
10548# define CHECK_BIT_FIELD(a_Field) \
10549 do \
10550 { \
10551 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10552 { \
10553 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10554 cDiffs++; \
10555 } \
10556 } while (0)
10557
10558# define CHECK_SEL(a_Sel) \
10559 do \
10560 { \
10561 CHECK_FIELD(a_Sel.Sel); \
10562 CHECK_FIELD(a_Sel.Attr.u); \
10563 CHECK_FIELD(a_Sel.u64Base); \
10564 CHECK_FIELD(a_Sel.u32Limit); \
10565 CHECK_FIELD(a_Sel.fFlags); \
10566 } while (0)
10567
10568 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10569 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10570
10571#if 1 /* The recompiler doesn't update these the intel way. */
10572 if (fRem)
10573 {
10574 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10575 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10576 pOrgXState->x87.CS = pDebugXState->x87.CS;
10577 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10578 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10579 pOrgXState->x87.DS = pDebugXState->x87.DS;
10580 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10581 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10582 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10583 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10584 }
10585#endif
10586 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10587 {
10588 RTAssertMsg2Weak(" the FPU state differs\n");
10589 cDiffs++;
10590 CHECK_XSTATE_FIELD(x87.FCW);
10591 CHECK_XSTATE_FIELD(x87.FSW);
10592 CHECK_XSTATE_FIELD(x87.FTW);
10593 CHECK_XSTATE_FIELD(x87.FOP);
10594 CHECK_XSTATE_FIELD(x87.FPUIP);
10595 CHECK_XSTATE_FIELD(x87.CS);
10596 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10597 CHECK_XSTATE_FIELD(x87.FPUDP);
10598 CHECK_XSTATE_FIELD(x87.DS);
10599 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10600 CHECK_XSTATE_FIELD(x87.MXCSR);
10601 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10602 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10603 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10604 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10605 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10606 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10607 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10608 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10609 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10610 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10611 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10612 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10613 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10614 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10615 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10616 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10617 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10618 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10619 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10620 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10621 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10622 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10623 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10624 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10625 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10626 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10627 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10628 }
10629 CHECK_FIELD(rip);
10630 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10631 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10632 {
10633 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10634 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10635 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10636 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10637 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10638 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10639 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10640 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10641 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10642 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10643 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10644 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10645 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10646 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10647 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10648 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10649 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10650 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10651 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10652 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10653 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10654 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10655 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10656 }
10657
10658 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10659 CHECK_FIELD(rax);
10660 CHECK_FIELD(rcx);
10661 if (!pIemCpu->fIgnoreRaxRdx)
10662 CHECK_FIELD(rdx);
10663 CHECK_FIELD(rbx);
10664 CHECK_FIELD(rsp);
10665 CHECK_FIELD(rbp);
10666 CHECK_FIELD(rsi);
10667 CHECK_FIELD(rdi);
10668 CHECK_FIELD(r8);
10669 CHECK_FIELD(r9);
10670 CHECK_FIELD(r10);
10671 CHECK_FIELD(r11);
10672 CHECK_FIELD(r12);
10673 CHECK_FIELD(r13);
10674 CHECK_SEL(cs);
10675 CHECK_SEL(ss);
10676 CHECK_SEL(ds);
10677 CHECK_SEL(es);
10678 CHECK_SEL(fs);
10679 CHECK_SEL(gs);
10680 CHECK_FIELD(cr0);
10681
10682 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10683 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10684 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10685 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10686 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10687 {
10688 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10689 { /* ignore */ }
10690 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10691 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10692 && fRem)
10693 { /* ignore */ }
10694 else
10695 CHECK_FIELD(cr2);
10696 }
10697 CHECK_FIELD(cr3);
10698 CHECK_FIELD(cr4);
10699 CHECK_FIELD(dr[0]);
10700 CHECK_FIELD(dr[1]);
10701 CHECK_FIELD(dr[2]);
10702 CHECK_FIELD(dr[3]);
10703 CHECK_FIELD(dr[6]);
10704 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10705 CHECK_FIELD(dr[7]);
10706 CHECK_FIELD(gdtr.cbGdt);
10707 CHECK_FIELD(gdtr.pGdt);
10708 CHECK_FIELD(idtr.cbIdt);
10709 CHECK_FIELD(idtr.pIdt);
10710 CHECK_SEL(ldtr);
10711 CHECK_SEL(tr);
10712 CHECK_FIELD(SysEnter.cs);
10713 CHECK_FIELD(SysEnter.eip);
10714 CHECK_FIELD(SysEnter.esp);
10715 CHECK_FIELD(msrEFER);
10716 CHECK_FIELD(msrSTAR);
10717 CHECK_FIELD(msrPAT);
10718 CHECK_FIELD(msrLSTAR);
10719 CHECK_FIELD(msrCSTAR);
10720 CHECK_FIELD(msrSFMASK);
10721 CHECK_FIELD(msrKERNELGSBASE);
10722
10723 if (cDiffs != 0)
10724 {
10725 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10726 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10727 iemVerifyAssertMsg2(pIemCpu);
10728 RTAssertPanic();
10729 }
10730# undef CHECK_FIELD
10731# undef CHECK_BIT_FIELD
10732 }
10733
10734 /*
10735 * If the register state compared fine, check the verification event
10736 * records.
10737 */
10738 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10739 {
10740 /*
10741 * Compare verficiation event records.
10742 * - I/O port accesses should be a 1:1 match.
10743 */
10744 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10745 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10746 while (pIemRec && pOtherRec)
10747 {
10748 /* Since we might miss RAM writes and reads, ignore reads and check
10749 that any written memory is the same extra ones. */
10750 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10751 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10752 && pIemRec->pNext)
10753 {
10754 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10755 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10756 pIemRec = pIemRec->pNext;
10757 }
10758
10759 /* Do the compare. */
10760 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10761 {
10762 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10763 break;
10764 }
10765 bool fEquals;
10766 switch (pIemRec->enmEvent)
10767 {
10768 case IEMVERIFYEVENT_IOPORT_READ:
10769 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10770 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10771 break;
10772 case IEMVERIFYEVENT_IOPORT_WRITE:
10773 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10774 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10775 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10776 break;
10777 case IEMVERIFYEVENT_IOPORT_STR_READ:
10778 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
10779 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
10780 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
10781 break;
10782 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10783 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
10784 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
10785 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
10786 break;
10787 case IEMVERIFYEVENT_RAM_READ:
10788 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10789 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10790 break;
10791 case IEMVERIFYEVENT_RAM_WRITE:
10792 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10793 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10794 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10795 break;
10796 default:
10797 fEquals = false;
10798 break;
10799 }
10800 if (!fEquals)
10801 {
10802 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10803 break;
10804 }
10805
10806 /* advance */
10807 pIemRec = pIemRec->pNext;
10808 pOtherRec = pOtherRec->pNext;
10809 }
10810
10811 /* Ignore extra writes and reads. */
10812 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10813 {
10814 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10815 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10816 pIemRec = pIemRec->pNext;
10817 }
10818 if (pIemRec != NULL)
10819 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10820 else if (pOtherRec != NULL)
10821 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10822 }
10823 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10824}
10825
10826#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10827
10828/* stubs */
10829IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10830{
10831 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10832 return VERR_INTERNAL_ERROR;
10833}
10834
10835IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10836{
10837 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10838 return VERR_INTERNAL_ERROR;
10839}
10840
10841#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10842
10843
10844#ifdef LOG_ENABLED
10845/**
10846 * Logs the current instruction.
10847 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10848 * @param pCtx The current CPU context.
10849 * @param fSameCtx Set if we have the same context information as the VMM,
10850 * clear if we may have already executed an instruction in
10851 * our debug context. When clear, we assume IEMCPU holds
10852 * valid CPU mode info.
10853 */
10854IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10855{
10856# ifdef IN_RING3
10857 if (LogIs2Enabled())
10858 {
10859 char szInstr[256];
10860 uint32_t cbInstr = 0;
10861 if (fSameCtx)
10862 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10863 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10864 szInstr, sizeof(szInstr), &cbInstr);
10865 else
10866 {
10867 uint32_t fFlags = 0;
10868 switch (pVCpu->iem.s.enmCpuMode)
10869 {
10870 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10871 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10872 case IEMMODE_16BIT:
10873 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10874 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10875 else
10876 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10877 break;
10878 }
10879 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10880 szInstr, sizeof(szInstr), &cbInstr);
10881 }
10882
10883 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10884 Log2(("****\n"
10885 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10886 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10887 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10888 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10889 " %s\n"
10890 ,
10891 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10892 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10893 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10894 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10895 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10896 szInstr));
10897
10898 if (LogIs3Enabled())
10899 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10900 }
10901 else
10902# endif
10903 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10904 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10905}
10906#endif
10907
10908
10909/**
10910 * Makes status code addjustments (pass up from I/O and access handler)
10911 * as well as maintaining statistics.
10912 *
10913 * @returns Strict VBox status code to pass up.
10914 * @param pIemCpu The IEM per CPU data.
10915 * @param rcStrict The status from executing an instruction.
10916 */
10917DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10918{
10919 if (rcStrict != VINF_SUCCESS)
10920 {
10921 if (RT_SUCCESS(rcStrict))
10922 {
10923 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10924 || rcStrict == VINF_IOM_R3_IOPORT_READ
10925 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10926 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
10927 || rcStrict == VINF_IOM_R3_MMIO_READ
10928 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10929 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10930 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
10931 || rcStrict == VINF_CPUM_R3_MSR_READ
10932 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10933 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10934 || rcStrict == VINF_EM_RAW_TO_R3
10935 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
10936 /* raw-mode / virt handlers only: */
10937 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10938 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10939 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10940 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10941 || rcStrict == VINF_SELM_SYNC_GDT
10942 || rcStrict == VINF_CSAM_PENDING_ACTION
10943 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10944 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10945/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10946 int32_t const rcPassUp = pIemCpu->rcPassUp;
10947 if (rcPassUp == VINF_SUCCESS)
10948 pIemCpu->cRetInfStatuses++;
10949 else if ( rcPassUp < VINF_EM_FIRST
10950 || rcPassUp > VINF_EM_LAST
10951 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10952 {
10953 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10954 pIemCpu->cRetPassUpStatus++;
10955 rcStrict = rcPassUp;
10956 }
10957 else
10958 {
10959 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10960 pIemCpu->cRetInfStatuses++;
10961 }
10962 }
10963 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10964 pIemCpu->cRetAspectNotImplemented++;
10965 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10966 pIemCpu->cRetInstrNotImplemented++;
10967#ifdef IEM_VERIFICATION_MODE_FULL
10968 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10969 rcStrict = VINF_SUCCESS;
10970#endif
10971 else
10972 pIemCpu->cRetErrStatuses++;
10973 }
10974 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10975 {
10976 pIemCpu->cRetPassUpStatus++;
10977 rcStrict = pIemCpu->rcPassUp;
10978 }
10979
10980 return rcStrict;
10981}
10982
10983
10984/**
10985 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10986 * IEMExecOneWithPrefetchedByPC.
10987 *
10988 * @return Strict VBox status code.
10989 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10990 * @param pIemCpu The IEM per CPU data.
10991 * @param fExecuteInhibit If set, execute the instruction following CLI,
10992 * POP SS and MOV SS,GR.
10993 */
10994DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10995{
10996 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10997 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10998 if (rcStrict == VINF_SUCCESS)
10999 pIemCpu->cInstructions++;
11000 if (pIemCpu->cActiveMappings > 0)
11001 iemMemRollback(pIemCpu);
11002//#ifdef DEBUG
11003// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
11004//#endif
11005
11006 /* Execute the next instruction as well if a cli, pop ss or
11007 mov ss, Gr has just completed successfully. */
11008 if ( fExecuteInhibit
11009 && rcStrict == VINF_SUCCESS
11010 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
11011 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
11012 {
11013 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
11014 if (rcStrict == VINF_SUCCESS)
11015 {
11016# ifdef LOG_ENABLED
11017 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
11018# endif
11019 IEM_OPCODE_GET_NEXT_U8(&b);
11020 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
11021 if (rcStrict == VINF_SUCCESS)
11022 pIemCpu->cInstructions++;
11023 if (pIemCpu->cActiveMappings > 0)
11024 iemMemRollback(pIemCpu);
11025 }
11026 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
11027 }
11028
11029 /*
11030 * Return value fiddling, statistics and sanity assertions.
11031 */
11032 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11033
11034 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
11035 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
11036#if defined(IEM_VERIFICATION_MODE_FULL)
11037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
11038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
11039 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
11040 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
11041#endif
11042 return rcStrict;
11043}
11044
11045
11046#ifdef IN_RC
11047/**
11048 * Re-enters raw-mode or ensure we return to ring-3.
11049 *
11050 * @returns rcStrict, maybe modified.
11051 * @param pIemCpu The IEM CPU structure.
11052 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11053 * @param pCtx The current CPU context.
11054 * @param rcStrict The status code returne by the interpreter.
11055 */
11056DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
11057{
11058 if (!pIemCpu->fInPatchCode)
11059 CPUMRawEnter(pVCpu);
11060 return rcStrict;
11061}
11062#endif
11063
11064
11065/**
11066 * Execute one instruction.
11067 *
11068 * @return Strict VBox status code.
11069 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11070 */
11071VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
11072{
11073 PIEMCPU pIemCpu = &pVCpu->iem.s;
11074
11075#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11076 iemExecVerificationModeSetup(pIemCpu);
11077#endif
11078#ifdef LOG_ENABLED
11079 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11080 iemLogCurInstr(pVCpu, pCtx, true);
11081#endif
11082
11083 /*
11084 * Do the decoding and emulation.
11085 */
11086 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11087 if (rcStrict == VINF_SUCCESS)
11088 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11089
11090#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11091 /*
11092 * Assert some sanity.
11093 */
11094 iemExecVerificationModeCheck(pIemCpu);
11095#endif
11096#ifdef IN_RC
11097 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11098#endif
11099 if (rcStrict != VINF_SUCCESS)
11100 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11101 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11102 return rcStrict;
11103}
11104
11105
11106VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11107{
11108 PIEMCPU pIemCpu = &pVCpu->iem.s;
11109 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11110 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11111
11112 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11113 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11114 if (rcStrict == VINF_SUCCESS)
11115 {
11116 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11117 if (pcbWritten)
11118 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11119 }
11120
11121#ifdef IN_RC
11122 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11123#endif
11124 return rcStrict;
11125}
11126
11127
11128VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11129 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11130{
11131 PIEMCPU pIemCpu = &pVCpu->iem.s;
11132 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11133 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11134
11135 VBOXSTRICTRC rcStrict;
11136 if ( cbOpcodeBytes
11137 && pCtx->rip == OpcodeBytesPC)
11138 {
11139 iemInitDecoder(pIemCpu, false);
11140 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11141 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11142 rcStrict = VINF_SUCCESS;
11143 }
11144 else
11145 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11146 if (rcStrict == VINF_SUCCESS)
11147 {
11148 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11149 }
11150
11151#ifdef IN_RC
11152 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11153#endif
11154 return rcStrict;
11155}
11156
11157
11158VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11159{
11160 PIEMCPU pIemCpu = &pVCpu->iem.s;
11161 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11162 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11163
11164 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11165 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11166 if (rcStrict == VINF_SUCCESS)
11167 {
11168 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11169 if (pcbWritten)
11170 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11171 }
11172
11173#ifdef IN_RC
11174 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11175#endif
11176 return rcStrict;
11177}
11178
11179
11180VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11181 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11182{
11183 PIEMCPU pIemCpu = &pVCpu->iem.s;
11184 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11185 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11186
11187 VBOXSTRICTRC rcStrict;
11188 if ( cbOpcodeBytes
11189 && pCtx->rip == OpcodeBytesPC)
11190 {
11191 iemInitDecoder(pIemCpu, true);
11192 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11193 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11194 rcStrict = VINF_SUCCESS;
11195 }
11196 else
11197 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11198 if (rcStrict == VINF_SUCCESS)
11199 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11200
11201#ifdef IN_RC
11202 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11203#endif
11204 return rcStrict;
11205}
11206
11207
11208VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11209{
11210 PIEMCPU pIemCpu = &pVCpu->iem.s;
11211
11212 /*
11213 * See if there is an interrupt pending in TRPM and inject it if we can.
11214 */
11215#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11216 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11217# ifdef IEM_VERIFICATION_MODE_FULL
11218 pIemCpu->uInjectCpl = UINT8_MAX;
11219# endif
11220 if ( pCtx->eflags.Bits.u1IF
11221 && TRPMHasTrap(pVCpu)
11222 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11223 {
11224 uint8_t u8TrapNo;
11225 TRPMEVENT enmType;
11226 RTGCUINT uErrCode;
11227 RTGCPTR uCr2;
11228 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11229 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11230 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11231 TRPMResetTrap(pVCpu);
11232 }
11233#else
11234 iemExecVerificationModeSetup(pIemCpu);
11235 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11236#endif
11237
11238 /*
11239 * Log the state.
11240 */
11241#ifdef LOG_ENABLED
11242 iemLogCurInstr(pVCpu, pCtx, true);
11243#endif
11244
11245 /*
11246 * Do the decoding and emulation.
11247 */
11248 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11249 if (rcStrict == VINF_SUCCESS)
11250 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11251
11252#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11253 /*
11254 * Assert some sanity.
11255 */
11256 iemExecVerificationModeCheck(pIemCpu);
11257#endif
11258
11259 /*
11260 * Maybe re-enter raw-mode and log.
11261 */
11262#ifdef IN_RC
11263 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11264#endif
11265 if (rcStrict != VINF_SUCCESS)
11266 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11267 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11268 return rcStrict;
11269}
11270
11271
11272
11273/**
11274 * Injects a trap, fault, abort, software interrupt or external interrupt.
11275 *
11276 * The parameter list matches TRPMQueryTrapAll pretty closely.
11277 *
11278 * @returns Strict VBox status code.
11279 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11280 * @param u8TrapNo The trap number.
11281 * @param enmType What type is it (trap/fault/abort), software
11282 * interrupt or hardware interrupt.
11283 * @param uErrCode The error code if applicable.
11284 * @param uCr2 The CR2 value if applicable.
11285 * @param cbInstr The instruction length (only relevant for
11286 * software interrupts).
11287 */
11288VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11289 uint8_t cbInstr)
11290{
11291 iemInitDecoder(&pVCpu->iem.s, false);
11292#ifdef DBGFTRACE_ENABLED
11293 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11294 u8TrapNo, enmType, uErrCode, uCr2);
11295#endif
11296
11297 uint32_t fFlags;
11298 switch (enmType)
11299 {
11300 case TRPM_HARDWARE_INT:
11301 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11302 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11303 uErrCode = uCr2 = 0;
11304 break;
11305
11306 case TRPM_SOFTWARE_INT:
11307 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11308 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11309 uErrCode = uCr2 = 0;
11310 break;
11311
11312 case TRPM_TRAP:
11313 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11314 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11315 if (u8TrapNo == X86_XCPT_PF)
11316 fFlags |= IEM_XCPT_FLAGS_CR2;
11317 switch (u8TrapNo)
11318 {
11319 case X86_XCPT_DF:
11320 case X86_XCPT_TS:
11321 case X86_XCPT_NP:
11322 case X86_XCPT_SS:
11323 case X86_XCPT_PF:
11324 case X86_XCPT_AC:
11325 fFlags |= IEM_XCPT_FLAGS_ERR;
11326 break;
11327
11328 case X86_XCPT_NMI:
11329 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11330 break;
11331 }
11332 break;
11333
11334 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11335 }
11336
11337 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11338}
11339
11340
11341/**
11342 * Injects the active TRPM event.
11343 *
11344 * @returns Strict VBox status code.
11345 * @param pVCpu The cross context virtual CPU structure.
11346 */
11347VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11348{
11349#ifndef IEM_IMPLEMENTS_TASKSWITCH
11350 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11351#else
11352 uint8_t u8TrapNo;
11353 TRPMEVENT enmType;
11354 RTGCUINT uErrCode;
11355 RTGCUINTPTR uCr2;
11356 uint8_t cbInstr;
11357 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11358 if (RT_FAILURE(rc))
11359 return rc;
11360
11361 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11362
11363 /** @todo Are there any other codes that imply the event was successfully
11364 * delivered to the guest? See @bugref{6607}. */
11365 if ( rcStrict == VINF_SUCCESS
11366 || rcStrict == VINF_IEM_RAISED_XCPT)
11367 {
11368 TRPMResetTrap(pVCpu);
11369 }
11370 return rcStrict;
11371#endif
11372}
11373
11374
11375VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11376{
11377 return VERR_NOT_IMPLEMENTED;
11378}
11379
11380
11381VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11382{
11383 return VERR_NOT_IMPLEMENTED;
11384}
11385
11386
11387#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11388/**
11389 * Executes a IRET instruction with default operand size.
11390 *
11391 * This is for PATM.
11392 *
11393 * @returns VBox status code.
11394 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11395 * @param pCtxCore The register frame.
11396 */
11397VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11398{
11399 PIEMCPU pIemCpu = &pVCpu->iem.s;
11400 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11401
11402 iemCtxCoreToCtx(pCtx, pCtxCore);
11403 iemInitDecoder(pIemCpu);
11404 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11405 if (rcStrict == VINF_SUCCESS)
11406 iemCtxToCtxCore(pCtxCore, pCtx);
11407 else
11408 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11409 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11410 return rcStrict;
11411}
11412#endif
11413
11414
11415/**
11416 * Macro used by the IEMExec* method to check the given instruction length.
11417 *
11418 * Will return on failure!
11419 *
11420 * @param a_cbInstr The given instruction length.
11421 * @param a_cbMin The minimum length.
11422 */
11423#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11424 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11425 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11426
11427
11428/**
11429 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
11430 *
11431 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
11432 *
11433 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
11434 * @param pIemCpu The IEM per-CPU structure.
11435 * @param rcStrict The status code to fiddle.
11436 */
11437DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
11438{
11439 iemUninitExec(pIemCpu);
11440#ifdef IN_RC
11441 return iemRCRawMaybeReenter(pIemCpu, IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx),
11442 iemExecStatusCodeFiddling(pIemCpu, rcStrict));
11443#else
11444 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11445#endif
11446}
11447
11448
11449/**
11450 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11451 *
11452 * This API ASSUMES that the caller has already verified that the guest code is
11453 * allowed to access the I/O port. (The I/O port is in the DX register in the
11454 * guest state.)
11455 *
11456 * @returns Strict VBox status code.
11457 * @param pVCpu The cross context virtual CPU structure.
11458 * @param cbValue The size of the I/O port access (1, 2, or 4).
11459 * @param enmAddrMode The addressing mode.
11460 * @param fRepPrefix Indicates whether a repeat prefix is used
11461 * (doesn't matter which for this instruction).
11462 * @param cbInstr The instruction length in bytes.
11463 * @param iEffSeg The effective segment address.
11464 * @param fIoChecked Whether the access to the I/O port has been
11465 * checked or not. It's typically checked in the
11466 * HM scenario.
11467 */
11468VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11469 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
11470{
11471 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11472 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11473
11474 /*
11475 * State init.
11476 */
11477 PIEMCPU pIemCpu = &pVCpu->iem.s;
11478 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11479
11480 /*
11481 * Switch orgy for getting to the right handler.
11482 */
11483 VBOXSTRICTRC rcStrict;
11484 if (fRepPrefix)
11485 {
11486 switch (enmAddrMode)
11487 {
11488 case IEMMODE_16BIT:
11489 switch (cbValue)
11490 {
11491 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11492 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11493 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11494 default:
11495 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11496 }
11497 break;
11498
11499 case IEMMODE_32BIT:
11500 switch (cbValue)
11501 {
11502 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11503 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11504 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11505 default:
11506 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11507 }
11508 break;
11509
11510 case IEMMODE_64BIT:
11511 switch (cbValue)
11512 {
11513 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11514 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11515 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11516 default:
11517 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11518 }
11519 break;
11520
11521 default:
11522 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11523 }
11524 }
11525 else
11526 {
11527 switch (enmAddrMode)
11528 {
11529 case IEMMODE_16BIT:
11530 switch (cbValue)
11531 {
11532 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11533 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11534 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11535 default:
11536 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11537 }
11538 break;
11539
11540 case IEMMODE_32BIT:
11541 switch (cbValue)
11542 {
11543 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11544 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11545 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11546 default:
11547 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11548 }
11549 break;
11550
11551 case IEMMODE_64BIT:
11552 switch (cbValue)
11553 {
11554 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11555 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11556 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11557 default:
11558 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11559 }
11560 break;
11561
11562 default:
11563 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11564 }
11565 }
11566
11567 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11568}
11569
11570
11571/**
11572 * Interface for HM and EM for executing string I/O IN (read) instructions.
11573 *
11574 * This API ASSUMES that the caller has already verified that the guest code is
11575 * allowed to access the I/O port. (The I/O port is in the DX register in the
11576 * guest state.)
11577 *
11578 * @returns Strict VBox status code.
11579 * @param pVCpu The cross context virtual CPU structure.
11580 * @param cbValue The size of the I/O port access (1, 2, or 4).
11581 * @param enmAddrMode The addressing mode.
11582 * @param fRepPrefix Indicates whether a repeat prefix is used
11583 * (doesn't matter which for this instruction).
11584 * @param cbInstr The instruction length in bytes.
11585 * @param fIoChecked Whether the access to the I/O port has been
11586 * checked or not. It's typically checked in the
11587 * HM scenario.
11588 */
11589VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11590 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
11591{
11592 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11593
11594 /*
11595 * State init.
11596 */
11597 PIEMCPU pIemCpu = &pVCpu->iem.s;
11598 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11599
11600 /*
11601 * Switch orgy for getting to the right handler.
11602 */
11603 VBOXSTRICTRC rcStrict;
11604 if (fRepPrefix)
11605 {
11606 switch (enmAddrMode)
11607 {
11608 case IEMMODE_16BIT:
11609 switch (cbValue)
11610 {
11611 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, fIoChecked); break;
11612 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, fIoChecked); break;
11613 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, fIoChecked); break;
11614 default:
11615 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11616 }
11617 break;
11618
11619 case IEMMODE_32BIT:
11620 switch (cbValue)
11621 {
11622 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, fIoChecked); break;
11623 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, fIoChecked); break;
11624 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, fIoChecked); break;
11625 default:
11626 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11627 }
11628 break;
11629
11630 case IEMMODE_64BIT:
11631 switch (cbValue)
11632 {
11633 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, fIoChecked); break;
11634 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, fIoChecked); break;
11635 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, fIoChecked); break;
11636 default:
11637 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11638 }
11639 break;
11640
11641 default:
11642 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11643 }
11644 }
11645 else
11646 {
11647 switch (enmAddrMode)
11648 {
11649 case IEMMODE_16BIT:
11650 switch (cbValue)
11651 {
11652 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, fIoChecked); break;
11653 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, fIoChecked); break;
11654 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, fIoChecked); break;
11655 default:
11656 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11657 }
11658 break;
11659
11660 case IEMMODE_32BIT:
11661 switch (cbValue)
11662 {
11663 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, fIoChecked); break;
11664 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, fIoChecked); break;
11665 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, fIoChecked); break;
11666 default:
11667 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11668 }
11669 break;
11670
11671 case IEMMODE_64BIT:
11672 switch (cbValue)
11673 {
11674 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, fIoChecked); break;
11675 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, fIoChecked); break;
11676 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, fIoChecked); break;
11677 default:
11678 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11679 }
11680 break;
11681
11682 default:
11683 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11684 }
11685 }
11686
11687 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11688}
11689
11690
11691/**
11692 * Interface for rawmode to write execute an OUT instruction.
11693 *
11694 * @returns Strict VBox status code.
11695 * @param pVCpu The cross context virtual CPU structure.
11696 * @param cbInstr The instruction length in bytes.
11697 * @param u16Port The port to read.
11698 * @param cbReg The register size.
11699 *
11700 * @remarks In ring-0 not all of the state needs to be synced in.
11701 */
11702VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
11703{
11704 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11705 Assert(cbReg <= 4 && cbReg != 3);
11706
11707 PIEMCPU pIemCpu = &pVCpu->iem.s;
11708 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11709 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
11710 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11711}
11712
11713
11714/**
11715 * Interface for rawmode to write execute an IN instruction.
11716 *
11717 * @returns Strict VBox status code.
11718 * @param pVCpu The cross context virtual CPU structure.
11719 * @param cbInstr The instruction length in bytes.
11720 * @param u16Port The port to read.
11721 * @param cbReg The register size.
11722 */
11723VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
11724{
11725 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11726 Assert(cbReg <= 4 && cbReg != 3);
11727
11728 PIEMCPU pIemCpu = &pVCpu->iem.s;
11729 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11730 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
11731 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11732}
11733
11734
11735/**
11736 * Interface for HM and EM to write to a CRx register.
11737 *
11738 * @returns Strict VBox status code.
11739 * @param pVCpu The cross context virtual CPU structure.
11740 * @param cbInstr The instruction length in bytes.
11741 * @param iCrReg The control register number (destination).
11742 * @param iGReg The general purpose register number (source).
11743 *
11744 * @remarks In ring-0 not all of the state needs to be synced in.
11745 */
11746VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11747{
11748 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11749 Assert(iCrReg < 16);
11750 Assert(iGReg < 16);
11751
11752 PIEMCPU pIemCpu = &pVCpu->iem.s;
11753 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11754 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11755 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11756}
11757
11758
11759/**
11760 * Interface for HM and EM to read from a CRx register.
11761 *
11762 * @returns Strict VBox status code.
11763 * @param pVCpu The cross context virtual CPU structure.
11764 * @param cbInstr The instruction length in bytes.
11765 * @param iGReg The general purpose register number (destination).
11766 * @param iCrReg The control register number (source).
11767 *
11768 * @remarks In ring-0 not all of the state needs to be synced in.
11769 */
11770VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11771{
11772 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11773 Assert(iCrReg < 16);
11774 Assert(iGReg < 16);
11775
11776 PIEMCPU pIemCpu = &pVCpu->iem.s;
11777 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11778 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11779 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11780}
11781
11782
11783/**
11784 * Interface for HM and EM to clear the CR0[TS] bit.
11785 *
11786 * @returns Strict VBox status code.
11787 * @param pVCpu The cross context virtual CPU structure.
11788 * @param cbInstr The instruction length in bytes.
11789 *
11790 * @remarks In ring-0 not all of the state needs to be synced in.
11791 */
11792VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11793{
11794 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11795
11796 PIEMCPU pIemCpu = &pVCpu->iem.s;
11797 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11798 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11799 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11800}
11801
11802
11803/**
11804 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11805 *
11806 * @returns Strict VBox status code.
11807 * @param pVCpu The cross context virtual CPU structure.
11808 * @param cbInstr The instruction length in bytes.
11809 * @param uValue The value to load into CR0.
11810 *
11811 * @remarks In ring-0 not all of the state needs to be synced in.
11812 */
11813VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11814{
11815 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11816
11817 PIEMCPU pIemCpu = &pVCpu->iem.s;
11818 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11819 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11820 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11821}
11822
11823
11824/**
11825 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11826 *
11827 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11828 *
11829 * @returns Strict VBox status code.
11830 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11831 * @param cbInstr The instruction length in bytes.
11832 * @remarks In ring-0 not all of the state needs to be synced in.
11833 * @thread EMT(pVCpu)
11834 */
11835VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11836{
11837 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11838
11839 PIEMCPU pIemCpu = &pVCpu->iem.s;
11840 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11841 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11842 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11843}
11844
11845#ifdef IN_RING3
11846
11847/**
11848 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11849 *
11850 * @returns Merge between @a rcStrict and what the commit operation returned.
11851 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11852 * @param rcStrict The status code returned by ring-0 or raw-mode.
11853 */
11854VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3DoPendingAction(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
11855{
11856 PIEMCPU pIemCpu = &pVCpu->iem.s;
11857
11858 /*
11859 * Retrieve and reset the pending commit.
11860 */
11861 IEMCOMMIT const enmFn = pIemCpu->PendingCommit.enmFn;
11862 pIemCpu->PendingCommit.enmFn = IEMCOMMIT_INVALID;
11863 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11864
11865 /*
11866 * Must reset pass-up status code.
11867 */
11868 pIemCpu->rcPassUp = VINF_SUCCESS;
11869
11870 /*
11871 * Call the function. Currently using switch here instead of function
11872 * pointer table as a switch won't get skewed.
11873 */
11874 VBOXSTRICTRC rcStrictCommit;
11875 switch (enmFn)
11876 {
11877 case IEMCOMMIT_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11878 case IEMCOMMIT_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11879 case IEMCOMMIT_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11880 case IEMCOMMIT_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11881 case IEMCOMMIT_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11882 case IEMCOMMIT_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11883 case IEMCOMMIT_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11884 case IEMCOMMIT_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11885 case IEMCOMMIT_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11886 case IEMCOMMIT_REP_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11887 case IEMCOMMIT_REP_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11888 case IEMCOMMIT_REP_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11889 case IEMCOMMIT_REP_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11890 case IEMCOMMIT_REP_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11891 case IEMCOMMIT_REP_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11892 case IEMCOMMIT_REP_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11893 case IEMCOMMIT_REP_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11894 case IEMCOMMIT_REP_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11895 default:
11896 AssertLogRelMsgFailedReturn(("enmFn=%#x (%d)\n", pIemCpu->PendingCommit.enmFn, pIemCpu->PendingCommit.enmFn), VERR_IEM_IPE_2);
11897 }
11898
11899 /*
11900 * Merge status code (if any) with the incomming one.
11901 */
11902 rcStrictCommit = iemExecStatusCodeFiddling(pIemCpu, rcStrictCommit);
11903 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11904 return rcStrict;
11905 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11906 return rcStrictCommit;
11907
11908 /* Complicated. */
11909 if (RT_FAILURE(rcStrict))
11910 return rcStrict;
11911 if (RT_FAILURE(rcStrictCommit))
11912 return rcStrictCommit;
11913 if ( rcStrict >= VINF_EM_FIRST
11914 && rcStrict <= VINF_EM_LAST)
11915 {
11916 if ( rcStrictCommit >= VINF_EM_FIRST
11917 && rcStrictCommit <= VINF_EM_LAST)
11918 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11919
11920 /* This really shouldn't happen. Check PGM + handler code! */
11921 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_1);
11922 }
11923 /* This shouldn't really happen either, see IOM_SUCCESS. */
11924 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_2);
11925}
11926
11927#endif /* IN_RING */
11928
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette