VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 60476

Last change on this file since 60476 was 60415, checked in by vboxsync, 9 years ago

IEM: Implemented main characteristics of 8086, 80186 and 80286.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 450.0 KB
Line 
1/* $Id: IEMAll.cpp 60415 2016-04-11 08:51:07Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 *
72 */
73
74/** @def IEM_VERIFICATION_MODE_MINIMAL
75 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
76 * context. */
77#if defined(DOXYGEN_RUNNING)
78# define IEM_VERIFICATION_MODE_MINIMAL
79#endif
80//#define IEM_LOG_MEMORY_WRITES
81#define IEM_IMPLEMENTS_TASKSWITCH
82
83
84/*********************************************************************************************************************************
85* Header Files *
86*********************************************************************************************************************************/
87#define LOG_GROUP LOG_GROUP_IEM
88#include <VBox/vmm/iem.h>
89#include <VBox/vmm/cpum.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <internal/pgm.h>
93#include <VBox/vmm/iom.h>
94#include <VBox/vmm/em.h>
95#include <VBox/vmm/hm.h>
96#include <VBox/vmm/tm.h>
97#include <VBox/vmm/dbgf.h>
98#include <VBox/vmm/dbgftrace.h>
99#ifdef VBOX_WITH_RAW_MODE_NOT_R0
100# include <VBox/vmm/patm.h>
101# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
102# include <VBox/vmm/csam.h>
103# endif
104#endif
105#include "IEMInternal.h"
106#ifdef IEM_VERIFICATION_MODE_FULL
107# include <VBox/vmm/rem.h>
108# include <VBox/vmm/mm.h>
109#endif
110#include <VBox/vmm/vm.h>
111#include <VBox/log.h>
112#include <VBox/err.h>
113#include <VBox/param.h>
114#include <VBox/dis.h>
115#include <VBox/disopcode.h>
116#include <iprt/assert.h>
117#include <iprt/string.h>
118#include <iprt/x86.h>
119
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/** @typedef PFNIEMOP
126 * Pointer to an opcode decoder function.
127 */
128
129/** @def FNIEMOP_DEF
130 * Define an opcode decoder function.
131 *
132 * We're using macors for this so that adding and removing parameters as well as
133 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
134 *
135 * @param a_Name The function name.
136 */
137
138
139#if defined(__GNUC__) && defined(RT_ARCH_X86)
140typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
141# define FNIEMOP_DEF(a_Name) \
142 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
143# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
144 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
145# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
146 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
147
148#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
149typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
150# define FNIEMOP_DEF(a_Name) \
151 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
152# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
153 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
154# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
155 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
156
157#elif defined(__GNUC__)
158typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
159# define FNIEMOP_DEF(a_Name) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
161# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
163# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
165
166#else
167typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
168# define FNIEMOP_DEF(a_Name) \
169 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
170# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
171 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
173 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
174
175#endif
176
177
178/**
179 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
180 */
181typedef union IEMSELDESC
182{
183 /** The legacy view. */
184 X86DESC Legacy;
185 /** The long mode view. */
186 X86DESC64 Long;
187} IEMSELDESC;
188/** Pointer to a selector descriptor table entry. */
189typedef IEMSELDESC *PIEMSELDESC;
190
191
192/*********************************************************************************************************************************
193* Defined Constants And Macros *
194*********************************************************************************************************************************/
195/** Temporary hack to disable the double execution. Will be removed in favor
196 * of a dedicated execution mode in EM. */
197//#define IEM_VERIFICATION_MODE_NO_REM
198
199/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
200 * due to GCC lacking knowledge about the value range of a switch. */
201#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
202
203/**
204 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
205 * occation.
206 */
207#ifdef LOG_ENABLED
208# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
209 do { \
210 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
212 } while (0)
213#else
214# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
215 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
216#endif
217
218/**
219 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
220 * occation using the supplied logger statement.
221 *
222 * @param a_LoggerArgs What to log on failure.
223 */
224#ifdef LOG_ENABLED
225# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
226 do { \
227 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
228 /*LogFunc(a_LoggerArgs);*/ \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
230 } while (0)
231#else
232# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
233 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
234#endif
235
236/**
237 * Call an opcode decoder function.
238 *
239 * We're using macors for this so that adding and removing parameters can be
240 * done as we please. See FNIEMOP_DEF.
241 */
242#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
243
244/**
245 * Call a common opcode decoder function taking one extra argument.
246 *
247 * We're using macors for this so that adding and removing parameters can be
248 * done as we please. See FNIEMOP_DEF_1.
249 */
250#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
251
252/**
253 * Call a common opcode decoder function taking one extra argument.
254 *
255 * We're using macors for this so that adding and removing parameters can be
256 * done as we please. See FNIEMOP_DEF_1.
257 */
258#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
259
260/**
261 * Check if we're currently executing in real or virtual 8086 mode.
262 *
263 * @returns @c true if it is, @c false if not.
264 * @param a_pIemCpu The IEM state of the current CPU.
265 */
266#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
267
268/**
269 * Check if we're currently executing in virtual 8086 mode.
270 *
271 * @returns @c true if it is, @c false if not.
272 * @param a_pIemCpu The IEM state of the current CPU.
273 */
274#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
275
276/**
277 * Check if we're currently executing in long mode.
278 *
279 * @returns @c true if it is, @c false if not.
280 * @param a_pIemCpu The IEM state of the current CPU.
281 */
282#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
283
284/**
285 * Check if we're currently executing in real mode.
286 *
287 * @returns @c true if it is, @c false if not.
288 * @param a_pIemCpu The IEM state of the current CPU.
289 */
290#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
291
292/**
293 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
294 * @returns PCCPUMFEATURES
295 * @param a_pIemCpu The IEM state of the current CPU.
296 */
297#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
298
299/**
300 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
301 * @returns PCCPUMFEATURES
302 * @param a_pIemCpu The IEM state of the current CPU.
303 */
304#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
305
306/**
307 * Evaluates to true if we're presenting an Intel CPU to the guest.
308 */
309#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
310
311/**
312 * Evaluates to true if we're presenting an AMD CPU to the guest.
313 */
314#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
315
316/**
317 * Check if the address is canonical.
318 */
319#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
320
321
322/*********************************************************************************************************************************
323* Global Variables *
324*********************************************************************************************************************************/
325extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
326
327
328/** Function table for the ADD instruction. */
329IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
330{
331 iemAImpl_add_u8, iemAImpl_add_u8_locked,
332 iemAImpl_add_u16, iemAImpl_add_u16_locked,
333 iemAImpl_add_u32, iemAImpl_add_u32_locked,
334 iemAImpl_add_u64, iemAImpl_add_u64_locked
335};
336
337/** Function table for the ADC instruction. */
338IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
339{
340 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
341 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
342 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
343 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
344};
345
346/** Function table for the SUB instruction. */
347IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
348{
349 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
350 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
351 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
352 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
353};
354
355/** Function table for the SBB instruction. */
356IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
357{
358 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
359 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
360 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
361 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
362};
363
364/** Function table for the OR instruction. */
365IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
366{
367 iemAImpl_or_u8, iemAImpl_or_u8_locked,
368 iemAImpl_or_u16, iemAImpl_or_u16_locked,
369 iemAImpl_or_u32, iemAImpl_or_u32_locked,
370 iemAImpl_or_u64, iemAImpl_or_u64_locked
371};
372
373/** Function table for the XOR instruction. */
374IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
375{
376 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
377 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
378 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
379 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
380};
381
382/** Function table for the AND instruction. */
383IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
384{
385 iemAImpl_and_u8, iemAImpl_and_u8_locked,
386 iemAImpl_and_u16, iemAImpl_and_u16_locked,
387 iemAImpl_and_u32, iemAImpl_and_u32_locked,
388 iemAImpl_and_u64, iemAImpl_and_u64_locked
389};
390
391/** Function table for the CMP instruction.
392 * @remarks Making operand order ASSUMPTIONS.
393 */
394IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
395{
396 iemAImpl_cmp_u8, NULL,
397 iemAImpl_cmp_u16, NULL,
398 iemAImpl_cmp_u32, NULL,
399 iemAImpl_cmp_u64, NULL
400};
401
402/** Function table for the TEST instruction.
403 * @remarks Making operand order ASSUMPTIONS.
404 */
405IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
406{
407 iemAImpl_test_u8, NULL,
408 iemAImpl_test_u16, NULL,
409 iemAImpl_test_u32, NULL,
410 iemAImpl_test_u64, NULL
411};
412
413/** Function table for the BT instruction. */
414IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
415{
416 NULL, NULL,
417 iemAImpl_bt_u16, NULL,
418 iemAImpl_bt_u32, NULL,
419 iemAImpl_bt_u64, NULL
420};
421
422/** Function table for the BTC instruction. */
423IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
424{
425 NULL, NULL,
426 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
427 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
428 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
429};
430
431/** Function table for the BTR instruction. */
432IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
433{
434 NULL, NULL,
435 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
436 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
437 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
438};
439
440/** Function table for the BTS instruction. */
441IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
442{
443 NULL, NULL,
444 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
445 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
446 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
447};
448
449/** Function table for the BSF instruction. */
450IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
451{
452 NULL, NULL,
453 iemAImpl_bsf_u16, NULL,
454 iemAImpl_bsf_u32, NULL,
455 iemAImpl_bsf_u64, NULL
456};
457
458/** Function table for the BSR instruction. */
459IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
460{
461 NULL, NULL,
462 iemAImpl_bsr_u16, NULL,
463 iemAImpl_bsr_u32, NULL,
464 iemAImpl_bsr_u64, NULL
465};
466
467/** Function table for the IMUL instruction. */
468IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
469{
470 NULL, NULL,
471 iemAImpl_imul_two_u16, NULL,
472 iemAImpl_imul_two_u32, NULL,
473 iemAImpl_imul_two_u64, NULL
474};
475
476/** Group 1 /r lookup table. */
477IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
478{
479 &g_iemAImpl_add,
480 &g_iemAImpl_or,
481 &g_iemAImpl_adc,
482 &g_iemAImpl_sbb,
483 &g_iemAImpl_and,
484 &g_iemAImpl_sub,
485 &g_iemAImpl_xor,
486 &g_iemAImpl_cmp
487};
488
489/** Function table for the INC instruction. */
490IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
491{
492 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
493 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
494 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
495 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
496};
497
498/** Function table for the DEC instruction. */
499IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
500{
501 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
502 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
503 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
504 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
505};
506
507/** Function table for the NEG instruction. */
508IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
509{
510 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
511 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
512 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
513 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
514};
515
516/** Function table for the NOT instruction. */
517IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
518{
519 iemAImpl_not_u8, iemAImpl_not_u8_locked,
520 iemAImpl_not_u16, iemAImpl_not_u16_locked,
521 iemAImpl_not_u32, iemAImpl_not_u32_locked,
522 iemAImpl_not_u64, iemAImpl_not_u64_locked
523};
524
525
526/** Function table for the ROL instruction. */
527IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
528{
529 iemAImpl_rol_u8,
530 iemAImpl_rol_u16,
531 iemAImpl_rol_u32,
532 iemAImpl_rol_u64
533};
534
535/** Function table for the ROR instruction. */
536IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
537{
538 iemAImpl_ror_u8,
539 iemAImpl_ror_u16,
540 iemAImpl_ror_u32,
541 iemAImpl_ror_u64
542};
543
544/** Function table for the RCL instruction. */
545IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
546{
547 iemAImpl_rcl_u8,
548 iemAImpl_rcl_u16,
549 iemAImpl_rcl_u32,
550 iemAImpl_rcl_u64
551};
552
553/** Function table for the RCR instruction. */
554IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
555{
556 iemAImpl_rcr_u8,
557 iemAImpl_rcr_u16,
558 iemAImpl_rcr_u32,
559 iemAImpl_rcr_u64
560};
561
562/** Function table for the SHL instruction. */
563IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
564{
565 iemAImpl_shl_u8,
566 iemAImpl_shl_u16,
567 iemAImpl_shl_u32,
568 iemAImpl_shl_u64
569};
570
571/** Function table for the SHR instruction. */
572IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
573{
574 iemAImpl_shr_u8,
575 iemAImpl_shr_u16,
576 iemAImpl_shr_u32,
577 iemAImpl_shr_u64
578};
579
580/** Function table for the SAR instruction. */
581IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
582{
583 iemAImpl_sar_u8,
584 iemAImpl_sar_u16,
585 iemAImpl_sar_u32,
586 iemAImpl_sar_u64
587};
588
589
590/** Function table for the MUL instruction. */
591IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
592{
593 iemAImpl_mul_u8,
594 iemAImpl_mul_u16,
595 iemAImpl_mul_u32,
596 iemAImpl_mul_u64
597};
598
599/** Function table for the IMUL instruction working implicitly on rAX. */
600IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
601{
602 iemAImpl_imul_u8,
603 iemAImpl_imul_u16,
604 iemAImpl_imul_u32,
605 iemAImpl_imul_u64
606};
607
608/** Function table for the DIV instruction. */
609IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
610{
611 iemAImpl_div_u8,
612 iemAImpl_div_u16,
613 iemAImpl_div_u32,
614 iemAImpl_div_u64
615};
616
617/** Function table for the MUL instruction. */
618IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
619{
620 iemAImpl_idiv_u8,
621 iemAImpl_idiv_u16,
622 iemAImpl_idiv_u32,
623 iemAImpl_idiv_u64
624};
625
626/** Function table for the SHLD instruction */
627IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
628{
629 iemAImpl_shld_u16,
630 iemAImpl_shld_u32,
631 iemAImpl_shld_u64,
632};
633
634/** Function table for the SHRD instruction */
635IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
636{
637 iemAImpl_shrd_u16,
638 iemAImpl_shrd_u32,
639 iemAImpl_shrd_u64,
640};
641
642
643/** Function table for the PUNPCKLBW instruction */
644IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
645/** Function table for the PUNPCKLBD instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
647/** Function table for the PUNPCKLDQ instruction */
648IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
649/** Function table for the PUNPCKLQDQ instruction */
650IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
651
652/** Function table for the PUNPCKHBW instruction */
653IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
654/** Function table for the PUNPCKHBD instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
656/** Function table for the PUNPCKHDQ instruction */
657IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
658/** Function table for the PUNPCKHQDQ instruction */
659IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
660
661/** Function table for the PXOR instruction */
662IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
663/** Function table for the PCMPEQB instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
665/** Function table for the PCMPEQW instruction */
666IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
667/** Function table for the PCMPEQD instruction */
668IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
669
670
671#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
672/** What IEM just wrote. */
673uint8_t g_abIemWrote[256];
674/** How much IEM just wrote. */
675size_t g_cbIemWrote;
676#endif
677
678
679/*********************************************************************************************************************************
680* Internal Functions *
681*********************************************************************************************************************************/
682IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
683IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
684IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
685IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
686/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
687IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
688IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
689IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
691IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
692IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
693IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
694IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
695IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
696IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
698IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
699IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
700IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
701IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
706IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
707IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
708IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
709IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
710IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
711IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
712IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
713IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
714IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
715
716#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
717IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
718#endif
719IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
720IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
721
722
723
724/**
725 * Sets the pass up status.
726 *
727 * @returns VINF_SUCCESS.
728 * @param pIemCpu The per CPU IEM state of the calling thread.
729 * @param rcPassUp The pass up status. Must be informational.
730 * VINF_SUCCESS is not allowed.
731 */
732IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
733{
734 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
735
736 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
737 if (rcOldPassUp == VINF_SUCCESS)
738 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
739 /* If both are EM scheduling codes, use EM priority rules. */
740 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
741 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
742 {
743 if (rcPassUp < rcOldPassUp)
744 {
745 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
746 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
747 }
748 else
749 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
750 }
751 /* Override EM scheduling with specific status code. */
752 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
753 {
754 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
755 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
756 }
757 /* Don't override specific status code, first come first served. */
758 else
759 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
760 return VINF_SUCCESS;
761}
762
763
764/**
765 * Calculates the CPU mode.
766 *
767 * This is mainly for updating IEMCPU::enmCpuMode.
768 *
769 * @returns CPU mode.
770 * @param pCtx The register context for the CPU.
771 */
772DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
773{
774 if (CPUMIsGuestIn64BitCodeEx(pCtx))
775 return IEMMODE_64BIT;
776 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
777 return IEMMODE_32BIT;
778 return IEMMODE_16BIT;
779}
780
781
782/**
783 * Initializes the execution state.
784 *
785 * @param pIemCpu The per CPU IEM state.
786 * @param fBypassHandlers Whether to bypass access handlers.
787 *
788 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
789 * side-effects in strict builds.
790 */
791DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
792{
793 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
794 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
795
796 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
797 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
798
799#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
800 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
801 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
802 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
804 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
805 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
807 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
808#endif
809
810#ifdef VBOX_WITH_RAW_MODE_NOT_R0
811 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
812#endif
813 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
814 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
815#ifdef VBOX_STRICT
816 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
817 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
818 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
819 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
820 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
821 pIemCpu->uRexReg = 127;
822 pIemCpu->uRexB = 127;
823 pIemCpu->uRexIndex = 127;
824 pIemCpu->iEffSeg = 127;
825 pIemCpu->offOpcode = 127;
826 pIemCpu->cbOpcode = 127;
827#endif
828
829 pIemCpu->cActiveMappings = 0;
830 pIemCpu->iNextMapping = 0;
831 pIemCpu->rcPassUp = VINF_SUCCESS;
832 pIemCpu->fBypassHandlers = fBypassHandlers;
833#ifdef VBOX_WITH_RAW_MODE_NOT_R0
834 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
835 && pCtx->cs.u64Base == 0
836 && pCtx->cs.u32Limit == UINT32_MAX
837 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
838 if (!pIemCpu->fInPatchCode)
839 CPUMRawLeave(pVCpu, VINF_SUCCESS);
840#endif
841
842#ifdef IEM_VERIFICATION_MODE_FULL
843 pIemCpu->fNoRemSavedByExec = pIemCpu->fNoRem;
844 pIemCpu->fNoRem = true;
845#endif
846}
847
848
849/**
850 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
851 *
852 * @param pIemCpu The per CPU IEM state.
853 */
854DECLINLINE(void) iemUninitExec(PIEMCPU pIemCpu)
855{
856#ifdef IEM_VERIFICATION_MODE_FULL
857 pIemCpu->fNoRem = pIemCpu->fNoRemSavedByExec;
858#endif
859#ifdef VBOX_STRICT
860 pIemCpu->cbOpcode = 0;
861#else
862 NOREF(pIemCpu);
863#endif
864}
865
866
867/**
868 * Initializes the decoder state.
869 *
870 * @param pIemCpu The per CPU IEM state.
871 * @param fBypassHandlers Whether to bypass access handlers.
872 */
873DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
874{
875 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
876 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
877
878 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
879 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
880
881#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
882 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
883 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
885 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
886 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
887 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
888 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
889 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
890#endif
891
892#ifdef VBOX_WITH_RAW_MODE_NOT_R0
893 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
894#endif
895 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
896#ifdef IEM_VERIFICATION_MODE_FULL
897 if (pIemCpu->uInjectCpl != UINT8_MAX)
898 pIemCpu->uCpl = pIemCpu->uInjectCpl;
899#endif
900 IEMMODE enmMode = iemCalcCpuMode(pCtx);
901 pIemCpu->enmCpuMode = enmMode;
902 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
903 pIemCpu->enmEffAddrMode = enmMode;
904 if (enmMode != IEMMODE_64BIT)
905 {
906 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
907 pIemCpu->enmEffOpSize = enmMode;
908 }
909 else
910 {
911 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
912 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
913 }
914 pIemCpu->fPrefixes = 0;
915 pIemCpu->uRexReg = 0;
916 pIemCpu->uRexB = 0;
917 pIemCpu->uRexIndex = 0;
918 pIemCpu->iEffSeg = X86_SREG_DS;
919 pIemCpu->offOpcode = 0;
920 pIemCpu->cbOpcode = 0;
921 pIemCpu->cActiveMappings = 0;
922 pIemCpu->iNextMapping = 0;
923 pIemCpu->rcPassUp = VINF_SUCCESS;
924 pIemCpu->fBypassHandlers = fBypassHandlers;
925#ifdef VBOX_WITH_RAW_MODE_NOT_R0
926 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
927 && pCtx->cs.u64Base == 0
928 && pCtx->cs.u32Limit == UINT32_MAX
929 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
930 if (!pIemCpu->fInPatchCode)
931 CPUMRawLeave(pVCpu, VINF_SUCCESS);
932#endif
933
934#ifdef DBGFTRACE_ENABLED
935 switch (enmMode)
936 {
937 case IEMMODE_64BIT:
938 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
939 break;
940 case IEMMODE_32BIT:
941 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
942 break;
943 case IEMMODE_16BIT:
944 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
945 break;
946 }
947#endif
948}
949
950
951/**
952 * Prefetch opcodes the first time when starting executing.
953 *
954 * @returns Strict VBox status code.
955 * @param pIemCpu The IEM state.
956 * @param fBypassHandlers Whether to bypass access handlers.
957 */
958IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
959{
960#ifdef IEM_VERIFICATION_MODE_FULL
961 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
962#endif
963 iemInitDecoder(pIemCpu, fBypassHandlers);
964
965 /*
966 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
967 *
968 * First translate CS:rIP to a physical address.
969 */
970 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
971 uint32_t cbToTryRead;
972 RTGCPTR GCPtrPC;
973 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
974 {
975 cbToTryRead = PAGE_SIZE;
976 GCPtrPC = pCtx->rip;
977 if (!IEM_IS_CANONICAL(GCPtrPC))
978 return iemRaiseGeneralProtectionFault0(pIemCpu);
979 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
980 }
981 else
982 {
983 uint32_t GCPtrPC32 = pCtx->eip;
984 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
985 if (GCPtrPC32 > pCtx->cs.u32Limit)
986 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
987 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
988 if (!cbToTryRead) /* overflowed */
989 {
990 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
991 cbToTryRead = UINT32_MAX;
992 }
993 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
994 Assert(GCPtrPC <= UINT32_MAX);
995 }
996
997#ifdef VBOX_WITH_RAW_MODE_NOT_R0
998 /* Allow interpretation of patch manager code blocks since they can for
999 instance throw #PFs for perfectly good reasons. */
1000 if (pIemCpu->fInPatchCode)
1001 {
1002 size_t cbRead = 0;
1003 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
1004 AssertRCReturn(rc, rc);
1005 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1006 return VINF_SUCCESS;
1007 }
1008#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1009
1010 RTGCPHYS GCPhys;
1011 uint64_t fFlags;
1012 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
1013 if (RT_FAILURE(rc))
1014 {
1015 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1016 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1017 }
1018 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1019 {
1020 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1021 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1022 }
1023 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1024 {
1025 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1026 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1027 }
1028 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1029 /** @todo Check reserved bits and such stuff. PGM is better at doing
1030 * that, so do it when implementing the guest virtual address
1031 * TLB... */
1032
1033#ifdef IEM_VERIFICATION_MODE_FULL
1034 /*
1035 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1036 * instruction.
1037 */
1038 /** @todo optimize this differently by not using PGMPhysRead. */
1039 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1040 pIemCpu->GCPhysOpcodes = GCPhys;
1041 if ( offPrevOpcodes < cbOldOpcodes
1042 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1043 {
1044 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1045 Assert(cbNew <= RT_ELEMENTS(pIemCpu->abOpcode));
1046 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1047 pIemCpu->cbOpcode = cbNew;
1048 return VINF_SUCCESS;
1049 }
1050#endif
1051
1052 /*
1053 * Read the bytes at this address.
1054 */
1055 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1056#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1057 size_t cbActual;
1058 if ( PATMIsEnabled(pVM)
1059 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1060 {
1061 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1062 Assert(cbActual > 0);
1063 pIemCpu->cbOpcode = (uint8_t)cbActual;
1064 }
1065 else
1066#endif
1067 {
1068 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1069 if (cbToTryRead > cbLeftOnPage)
1070 cbToTryRead = cbLeftOnPage;
1071 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1072 cbToTryRead = sizeof(pIemCpu->abOpcode);
1073
1074 if (!pIemCpu->fBypassHandlers)
1075 {
1076 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1077 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1078 { /* likely */ }
1079 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1080 {
1081 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1082 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1083 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1084 }
1085 else
1086 {
1087 Log((RT_SUCCESS(rcStrict)
1088 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1089 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1090 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1091 return rcStrict;
1092 }
1093 }
1094 else
1095 {
1096 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1097 if (RT_SUCCESS(rc))
1098 { /* likely */ }
1099 else
1100 {
1101 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1102 GCPtrPC, GCPhys, rc, cbToTryRead));
1103 return rc;
1104 }
1105 }
1106 pIemCpu->cbOpcode = cbToTryRead;
1107 }
1108
1109 return VINF_SUCCESS;
1110}
1111
1112
1113/**
1114 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1115 * exception if it fails.
1116 *
1117 * @returns Strict VBox status code.
1118 * @param pIemCpu The IEM state.
1119 * @param cbMin The minimum number of bytes relative offOpcode
1120 * that must be read.
1121 */
1122IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1123{
1124 /*
1125 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1126 *
1127 * First translate CS:rIP to a physical address.
1128 */
1129 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1130 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1131 uint32_t cbToTryRead;
1132 RTGCPTR GCPtrNext;
1133 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1134 {
1135 cbToTryRead = PAGE_SIZE;
1136 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1137 if (!IEM_IS_CANONICAL(GCPtrNext))
1138 return iemRaiseGeneralProtectionFault0(pIemCpu);
1139 }
1140 else
1141 {
1142 uint32_t GCPtrNext32 = pCtx->eip;
1143 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1144 GCPtrNext32 += pIemCpu->cbOpcode;
1145 if (GCPtrNext32 > pCtx->cs.u32Limit)
1146 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1147 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1148 if (!cbToTryRead) /* overflowed */
1149 {
1150 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1151 cbToTryRead = UINT32_MAX;
1152 /** @todo check out wrapping around the code segment. */
1153 }
1154 if (cbToTryRead < cbMin - cbLeft)
1155 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1156 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1157 }
1158
1159 /* Only read up to the end of the page, and make sure we don't read more
1160 than the opcode buffer can hold. */
1161 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1162 if (cbToTryRead > cbLeftOnPage)
1163 cbToTryRead = cbLeftOnPage;
1164 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1165 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1166/** @todo r=bird: Convert assertion into undefined opcode exception? */
1167 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1168
1169#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1170 /* Allow interpretation of patch manager code blocks since they can for
1171 instance throw #PFs for perfectly good reasons. */
1172 if (pIemCpu->fInPatchCode)
1173 {
1174 size_t cbRead = 0;
1175 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1176 AssertRCReturn(rc, rc);
1177 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1178 return VINF_SUCCESS;
1179 }
1180#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1181
1182 RTGCPHYS GCPhys;
1183 uint64_t fFlags;
1184 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1185 if (RT_FAILURE(rc))
1186 {
1187 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1188 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1189 }
1190 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1191 {
1192 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1193 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1194 }
1195 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1196 {
1197 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1198 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1199 }
1200 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1201 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1202 /** @todo Check reserved bits and such stuff. PGM is better at doing
1203 * that, so do it when implementing the guest virtual address
1204 * TLB... */
1205
1206 /*
1207 * Read the bytes at this address.
1208 *
1209 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1210 * and since PATM should only patch the start of an instruction there
1211 * should be no need to check again here.
1212 */
1213 if (!pIemCpu->fBypassHandlers)
1214 {
1215 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1216 cbToTryRead, PGMACCESSORIGIN_IEM);
1217 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1218 { /* likely */ }
1219 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1220 {
1221 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1222 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1223 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1224 }
1225 else
1226 {
1227 Log((RT_SUCCESS(rcStrict)
1228 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1229 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1230 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1231 return rcStrict;
1232 }
1233 }
1234 else
1235 {
1236 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1237 if (RT_SUCCESS(rc))
1238 { /* likely */ }
1239 else
1240 {
1241 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1242 return rc;
1243 }
1244 }
1245 pIemCpu->cbOpcode += cbToTryRead;
1246 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1247
1248 return VINF_SUCCESS;
1249}
1250
1251
1252/**
1253 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1254 *
1255 * @returns Strict VBox status code.
1256 * @param pIemCpu The IEM state.
1257 * @param pb Where to return the opcode byte.
1258 */
1259DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1260{
1261 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1262 if (rcStrict == VINF_SUCCESS)
1263 {
1264 uint8_t offOpcode = pIemCpu->offOpcode;
1265 *pb = pIemCpu->abOpcode[offOpcode];
1266 pIemCpu->offOpcode = offOpcode + 1;
1267 }
1268 else
1269 *pb = 0;
1270 return rcStrict;
1271}
1272
1273
1274/**
1275 * Fetches the next opcode byte.
1276 *
1277 * @returns Strict VBox status code.
1278 * @param pIemCpu The IEM state.
1279 * @param pu8 Where to return the opcode byte.
1280 */
1281DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1282{
1283 uint8_t const offOpcode = pIemCpu->offOpcode;
1284 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1285 {
1286 *pu8 = pIemCpu->abOpcode[offOpcode];
1287 pIemCpu->offOpcode = offOpcode + 1;
1288 return VINF_SUCCESS;
1289 }
1290 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1291}
1292
1293
1294/**
1295 * Fetches the next opcode byte, returns automatically on failure.
1296 *
1297 * @param a_pu8 Where to return the opcode byte.
1298 * @remark Implicitly references pIemCpu.
1299 */
1300#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1301 do \
1302 { \
1303 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1304 if (rcStrict2 != VINF_SUCCESS) \
1305 return rcStrict2; \
1306 } while (0)
1307
1308
1309/**
1310 * Fetches the next signed byte from the opcode stream.
1311 *
1312 * @returns Strict VBox status code.
1313 * @param pIemCpu The IEM state.
1314 * @param pi8 Where to return the signed byte.
1315 */
1316DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1317{
1318 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1319}
1320
1321
1322/**
1323 * Fetches the next signed byte from the opcode stream, returning automatically
1324 * on failure.
1325 *
1326 * @param a_pi8 Where to return the signed byte.
1327 * @remark Implicitly references pIemCpu.
1328 */
1329#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1330 do \
1331 { \
1332 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1333 if (rcStrict2 != VINF_SUCCESS) \
1334 return rcStrict2; \
1335 } while (0)
1336
1337
1338/**
1339 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1340 *
1341 * @returns Strict VBox status code.
1342 * @param pIemCpu The IEM state.
1343 * @param pu16 Where to return the opcode dword.
1344 */
1345DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1346{
1347 uint8_t u8;
1348 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1349 if (rcStrict == VINF_SUCCESS)
1350 *pu16 = (int8_t)u8;
1351 return rcStrict;
1352}
1353
1354
1355/**
1356 * Fetches the next signed byte from the opcode stream, extending it to
1357 * unsigned 16-bit.
1358 *
1359 * @returns Strict VBox status code.
1360 * @param pIemCpu The IEM state.
1361 * @param pu16 Where to return the unsigned word.
1362 */
1363DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1364{
1365 uint8_t const offOpcode = pIemCpu->offOpcode;
1366 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1367 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1368
1369 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1370 pIemCpu->offOpcode = offOpcode + 1;
1371 return VINF_SUCCESS;
1372}
1373
1374
1375/**
1376 * Fetches the next signed byte from the opcode stream and sign-extending it to
1377 * a word, returning automatically on failure.
1378 *
1379 * @param a_pu16 Where to return the word.
1380 * @remark Implicitly references pIemCpu.
1381 */
1382#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1383 do \
1384 { \
1385 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1386 if (rcStrict2 != VINF_SUCCESS) \
1387 return rcStrict2; \
1388 } while (0)
1389
1390
1391/**
1392 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1393 *
1394 * @returns Strict VBox status code.
1395 * @param pIemCpu The IEM state.
1396 * @param pu32 Where to return the opcode dword.
1397 */
1398DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1399{
1400 uint8_t u8;
1401 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1402 if (rcStrict == VINF_SUCCESS)
1403 *pu32 = (int8_t)u8;
1404 return rcStrict;
1405}
1406
1407
1408/**
1409 * Fetches the next signed byte from the opcode stream, extending it to
1410 * unsigned 32-bit.
1411 *
1412 * @returns Strict VBox status code.
1413 * @param pIemCpu The IEM state.
1414 * @param pu32 Where to return the unsigned dword.
1415 */
1416DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1417{
1418 uint8_t const offOpcode = pIemCpu->offOpcode;
1419 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1420 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1421
1422 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1423 pIemCpu->offOpcode = offOpcode + 1;
1424 return VINF_SUCCESS;
1425}
1426
1427
1428/**
1429 * Fetches the next signed byte from the opcode stream and sign-extending it to
1430 * a word, returning automatically on failure.
1431 *
1432 * @param a_pu32 Where to return the word.
1433 * @remark Implicitly references pIemCpu.
1434 */
1435#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1436 do \
1437 { \
1438 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1439 if (rcStrict2 != VINF_SUCCESS) \
1440 return rcStrict2; \
1441 } while (0)
1442
1443
1444/**
1445 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1446 *
1447 * @returns Strict VBox status code.
1448 * @param pIemCpu The IEM state.
1449 * @param pu64 Where to return the opcode qword.
1450 */
1451DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1452{
1453 uint8_t u8;
1454 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1455 if (rcStrict == VINF_SUCCESS)
1456 *pu64 = (int8_t)u8;
1457 return rcStrict;
1458}
1459
1460
1461/**
1462 * Fetches the next signed byte from the opcode stream, extending it to
1463 * unsigned 64-bit.
1464 *
1465 * @returns Strict VBox status code.
1466 * @param pIemCpu The IEM state.
1467 * @param pu64 Where to return the unsigned qword.
1468 */
1469DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1470{
1471 uint8_t const offOpcode = pIemCpu->offOpcode;
1472 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1473 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1474
1475 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1476 pIemCpu->offOpcode = offOpcode + 1;
1477 return VINF_SUCCESS;
1478}
1479
1480
1481/**
1482 * Fetches the next signed byte from the opcode stream and sign-extending it to
1483 * a word, returning automatically on failure.
1484 *
1485 * @param a_pu64 Where to return the word.
1486 * @remark Implicitly references pIemCpu.
1487 */
1488#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1489 do \
1490 { \
1491 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1492 if (rcStrict2 != VINF_SUCCESS) \
1493 return rcStrict2; \
1494 } while (0)
1495
1496
1497/**
1498 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1499 *
1500 * @returns Strict VBox status code.
1501 * @param pIemCpu The IEM state.
1502 * @param pu16 Where to return the opcode word.
1503 */
1504DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1505{
1506 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1507 if (rcStrict == VINF_SUCCESS)
1508 {
1509 uint8_t offOpcode = pIemCpu->offOpcode;
1510 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1511 pIemCpu->offOpcode = offOpcode + 2;
1512 }
1513 else
1514 *pu16 = 0;
1515 return rcStrict;
1516}
1517
1518
1519/**
1520 * Fetches the next opcode word.
1521 *
1522 * @returns Strict VBox status code.
1523 * @param pIemCpu The IEM state.
1524 * @param pu16 Where to return the opcode word.
1525 */
1526DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1527{
1528 uint8_t const offOpcode = pIemCpu->offOpcode;
1529 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1530 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1531
1532 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1533 pIemCpu->offOpcode = offOpcode + 2;
1534 return VINF_SUCCESS;
1535}
1536
1537
1538/**
1539 * Fetches the next opcode word, returns automatically on failure.
1540 *
1541 * @param a_pu16 Where to return the opcode word.
1542 * @remark Implicitly references pIemCpu.
1543 */
1544#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1545 do \
1546 { \
1547 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1548 if (rcStrict2 != VINF_SUCCESS) \
1549 return rcStrict2; \
1550 } while (0)
1551
1552
1553/**
1554 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1555 *
1556 * @returns Strict VBox status code.
1557 * @param pIemCpu The IEM state.
1558 * @param pu32 Where to return the opcode double word.
1559 */
1560DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1561{
1562 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1563 if (rcStrict == VINF_SUCCESS)
1564 {
1565 uint8_t offOpcode = pIemCpu->offOpcode;
1566 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1567 pIemCpu->offOpcode = offOpcode + 2;
1568 }
1569 else
1570 *pu32 = 0;
1571 return rcStrict;
1572}
1573
1574
1575/**
1576 * Fetches the next opcode word, zero extending it to a double word.
1577 *
1578 * @returns Strict VBox status code.
1579 * @param pIemCpu The IEM state.
1580 * @param pu32 Where to return the opcode double word.
1581 */
1582DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1583{
1584 uint8_t const offOpcode = pIemCpu->offOpcode;
1585 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1586 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1587
1588 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1589 pIemCpu->offOpcode = offOpcode + 2;
1590 return VINF_SUCCESS;
1591}
1592
1593
1594/**
1595 * Fetches the next opcode word and zero extends it to a double word, returns
1596 * automatically on failure.
1597 *
1598 * @param a_pu32 Where to return the opcode double word.
1599 * @remark Implicitly references pIemCpu.
1600 */
1601#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1602 do \
1603 { \
1604 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1605 if (rcStrict2 != VINF_SUCCESS) \
1606 return rcStrict2; \
1607 } while (0)
1608
1609
1610/**
1611 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1612 *
1613 * @returns Strict VBox status code.
1614 * @param pIemCpu The IEM state.
1615 * @param pu64 Where to return the opcode quad word.
1616 */
1617DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1618{
1619 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1620 if (rcStrict == VINF_SUCCESS)
1621 {
1622 uint8_t offOpcode = pIemCpu->offOpcode;
1623 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1624 pIemCpu->offOpcode = offOpcode + 2;
1625 }
1626 else
1627 *pu64 = 0;
1628 return rcStrict;
1629}
1630
1631
1632/**
1633 * Fetches the next opcode word, zero extending it to a quad word.
1634 *
1635 * @returns Strict VBox status code.
1636 * @param pIemCpu The IEM state.
1637 * @param pu64 Where to return the opcode quad word.
1638 */
1639DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1640{
1641 uint8_t const offOpcode = pIemCpu->offOpcode;
1642 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1643 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1644
1645 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1646 pIemCpu->offOpcode = offOpcode + 2;
1647 return VINF_SUCCESS;
1648}
1649
1650
1651/**
1652 * Fetches the next opcode word and zero extends it to a quad word, returns
1653 * automatically on failure.
1654 *
1655 * @param a_pu64 Where to return the opcode quad word.
1656 * @remark Implicitly references pIemCpu.
1657 */
1658#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1659 do \
1660 { \
1661 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1662 if (rcStrict2 != VINF_SUCCESS) \
1663 return rcStrict2; \
1664 } while (0)
1665
1666
1667/**
1668 * Fetches the next signed word from the opcode stream.
1669 *
1670 * @returns Strict VBox status code.
1671 * @param pIemCpu The IEM state.
1672 * @param pi16 Where to return the signed word.
1673 */
1674DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1675{
1676 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1677}
1678
1679
1680/**
1681 * Fetches the next signed word from the opcode stream, returning automatically
1682 * on failure.
1683 *
1684 * @param a_pi16 Where to return the signed word.
1685 * @remark Implicitly references pIemCpu.
1686 */
1687#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1688 do \
1689 { \
1690 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1691 if (rcStrict2 != VINF_SUCCESS) \
1692 return rcStrict2; \
1693 } while (0)
1694
1695
1696/**
1697 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1698 *
1699 * @returns Strict VBox status code.
1700 * @param pIemCpu The IEM state.
1701 * @param pu32 Where to return the opcode dword.
1702 */
1703DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1704{
1705 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1706 if (rcStrict == VINF_SUCCESS)
1707 {
1708 uint8_t offOpcode = pIemCpu->offOpcode;
1709 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1710 pIemCpu->abOpcode[offOpcode + 1],
1711 pIemCpu->abOpcode[offOpcode + 2],
1712 pIemCpu->abOpcode[offOpcode + 3]);
1713 pIemCpu->offOpcode = offOpcode + 4;
1714 }
1715 else
1716 *pu32 = 0;
1717 return rcStrict;
1718}
1719
1720
1721/**
1722 * Fetches the next opcode dword.
1723 *
1724 * @returns Strict VBox status code.
1725 * @param pIemCpu The IEM state.
1726 * @param pu32 Where to return the opcode double word.
1727 */
1728DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1729{
1730 uint8_t const offOpcode = pIemCpu->offOpcode;
1731 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1732 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1733
1734 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1735 pIemCpu->abOpcode[offOpcode + 1],
1736 pIemCpu->abOpcode[offOpcode + 2],
1737 pIemCpu->abOpcode[offOpcode + 3]);
1738 pIemCpu->offOpcode = offOpcode + 4;
1739 return VINF_SUCCESS;
1740}
1741
1742
1743/**
1744 * Fetches the next opcode dword, returns automatically on failure.
1745 *
1746 * @param a_pu32 Where to return the opcode dword.
1747 * @remark Implicitly references pIemCpu.
1748 */
1749#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1750 do \
1751 { \
1752 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1753 if (rcStrict2 != VINF_SUCCESS) \
1754 return rcStrict2; \
1755 } while (0)
1756
1757
1758/**
1759 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1760 *
1761 * @returns Strict VBox status code.
1762 * @param pIemCpu The IEM state.
1763 * @param pu64 Where to return the opcode dword.
1764 */
1765DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1766{
1767 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1768 if (rcStrict == VINF_SUCCESS)
1769 {
1770 uint8_t offOpcode = pIemCpu->offOpcode;
1771 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1772 pIemCpu->abOpcode[offOpcode + 1],
1773 pIemCpu->abOpcode[offOpcode + 2],
1774 pIemCpu->abOpcode[offOpcode + 3]);
1775 pIemCpu->offOpcode = offOpcode + 4;
1776 }
1777 else
1778 *pu64 = 0;
1779 return rcStrict;
1780}
1781
1782
1783/**
1784 * Fetches the next opcode dword, zero extending it to a quad word.
1785 *
1786 * @returns Strict VBox status code.
1787 * @param pIemCpu The IEM state.
1788 * @param pu64 Where to return the opcode quad word.
1789 */
1790DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1791{
1792 uint8_t const offOpcode = pIemCpu->offOpcode;
1793 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1794 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1795
1796 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1797 pIemCpu->abOpcode[offOpcode + 1],
1798 pIemCpu->abOpcode[offOpcode + 2],
1799 pIemCpu->abOpcode[offOpcode + 3]);
1800 pIemCpu->offOpcode = offOpcode + 4;
1801 return VINF_SUCCESS;
1802}
1803
1804
1805/**
1806 * Fetches the next opcode dword and zero extends it to a quad word, returns
1807 * automatically on failure.
1808 *
1809 * @param a_pu64 Where to return the opcode quad word.
1810 * @remark Implicitly references pIemCpu.
1811 */
1812#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1813 do \
1814 { \
1815 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1816 if (rcStrict2 != VINF_SUCCESS) \
1817 return rcStrict2; \
1818 } while (0)
1819
1820
1821/**
1822 * Fetches the next signed double word from the opcode stream.
1823 *
1824 * @returns Strict VBox status code.
1825 * @param pIemCpu The IEM state.
1826 * @param pi32 Where to return the signed double word.
1827 */
1828DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1829{
1830 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1831}
1832
1833/**
1834 * Fetches the next signed double word from the opcode stream, returning
1835 * automatically on failure.
1836 *
1837 * @param a_pi32 Where to return the signed double word.
1838 * @remark Implicitly references pIemCpu.
1839 */
1840#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1841 do \
1842 { \
1843 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1844 if (rcStrict2 != VINF_SUCCESS) \
1845 return rcStrict2; \
1846 } while (0)
1847
1848
1849/**
1850 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1851 *
1852 * @returns Strict VBox status code.
1853 * @param pIemCpu The IEM state.
1854 * @param pu64 Where to return the opcode qword.
1855 */
1856DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1857{
1858 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1859 if (rcStrict == VINF_SUCCESS)
1860 {
1861 uint8_t offOpcode = pIemCpu->offOpcode;
1862 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1863 pIemCpu->abOpcode[offOpcode + 1],
1864 pIemCpu->abOpcode[offOpcode + 2],
1865 pIemCpu->abOpcode[offOpcode + 3]);
1866 pIemCpu->offOpcode = offOpcode + 4;
1867 }
1868 else
1869 *pu64 = 0;
1870 return rcStrict;
1871}
1872
1873
1874/**
1875 * Fetches the next opcode dword, sign extending it into a quad word.
1876 *
1877 * @returns Strict VBox status code.
1878 * @param pIemCpu The IEM state.
1879 * @param pu64 Where to return the opcode quad word.
1880 */
1881DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1882{
1883 uint8_t const offOpcode = pIemCpu->offOpcode;
1884 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1885 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1886
1887 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1888 pIemCpu->abOpcode[offOpcode + 1],
1889 pIemCpu->abOpcode[offOpcode + 2],
1890 pIemCpu->abOpcode[offOpcode + 3]);
1891 *pu64 = i32;
1892 pIemCpu->offOpcode = offOpcode + 4;
1893 return VINF_SUCCESS;
1894}
1895
1896
1897/**
1898 * Fetches the next opcode double word and sign extends it to a quad word,
1899 * returns automatically on failure.
1900 *
1901 * @param a_pu64 Where to return the opcode quad word.
1902 * @remark Implicitly references pIemCpu.
1903 */
1904#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1905 do \
1906 { \
1907 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1908 if (rcStrict2 != VINF_SUCCESS) \
1909 return rcStrict2; \
1910 } while (0)
1911
1912
1913/**
1914 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1915 *
1916 * @returns Strict VBox status code.
1917 * @param pIemCpu The IEM state.
1918 * @param pu64 Where to return the opcode qword.
1919 */
1920DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1921{
1922 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1923 if (rcStrict == VINF_SUCCESS)
1924 {
1925 uint8_t offOpcode = pIemCpu->offOpcode;
1926 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1927 pIemCpu->abOpcode[offOpcode + 1],
1928 pIemCpu->abOpcode[offOpcode + 2],
1929 pIemCpu->abOpcode[offOpcode + 3],
1930 pIemCpu->abOpcode[offOpcode + 4],
1931 pIemCpu->abOpcode[offOpcode + 5],
1932 pIemCpu->abOpcode[offOpcode + 6],
1933 pIemCpu->abOpcode[offOpcode + 7]);
1934 pIemCpu->offOpcode = offOpcode + 8;
1935 }
1936 else
1937 *pu64 = 0;
1938 return rcStrict;
1939}
1940
1941
1942/**
1943 * Fetches the next opcode qword.
1944 *
1945 * @returns Strict VBox status code.
1946 * @param pIemCpu The IEM state.
1947 * @param pu64 Where to return the opcode qword.
1948 */
1949DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1950{
1951 uint8_t const offOpcode = pIemCpu->offOpcode;
1952 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1953 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1954
1955 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1956 pIemCpu->abOpcode[offOpcode + 1],
1957 pIemCpu->abOpcode[offOpcode + 2],
1958 pIemCpu->abOpcode[offOpcode + 3],
1959 pIemCpu->abOpcode[offOpcode + 4],
1960 pIemCpu->abOpcode[offOpcode + 5],
1961 pIemCpu->abOpcode[offOpcode + 6],
1962 pIemCpu->abOpcode[offOpcode + 7]);
1963 pIemCpu->offOpcode = offOpcode + 8;
1964 return VINF_SUCCESS;
1965}
1966
1967
1968/**
1969 * Fetches the next opcode quad word, returns automatically on failure.
1970 *
1971 * @param a_pu64 Where to return the opcode quad word.
1972 * @remark Implicitly references pIemCpu.
1973 */
1974#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1975 do \
1976 { \
1977 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1978 if (rcStrict2 != VINF_SUCCESS) \
1979 return rcStrict2; \
1980 } while (0)
1981
1982
1983/** @name Misc Worker Functions.
1984 * @{
1985 */
1986
1987
1988/**
1989 * Validates a new SS segment.
1990 *
1991 * @returns VBox strict status code.
1992 * @param pIemCpu The IEM per CPU instance data.
1993 * @param pCtx The CPU context.
1994 * @param NewSS The new SS selctor.
1995 * @param uCpl The CPL to load the stack for.
1996 * @param pDesc Where to return the descriptor.
1997 */
1998IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1999{
2000 NOREF(pCtx);
2001
2002 /* Null selectors are not allowed (we're not called for dispatching
2003 interrupts with SS=0 in long mode). */
2004 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2005 {
2006 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2007 return iemRaiseTaskSwitchFault0(pIemCpu);
2008 }
2009
2010 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2011 if ((NewSS & X86_SEL_RPL) != uCpl)
2012 {
2013 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2014 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2015 }
2016
2017 /*
2018 * Read the descriptor.
2019 */
2020 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
2021 if (rcStrict != VINF_SUCCESS)
2022 return rcStrict;
2023
2024 /*
2025 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2026 */
2027 if (!pDesc->Legacy.Gen.u1DescType)
2028 {
2029 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2030 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2031 }
2032
2033 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2034 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2035 {
2036 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2037 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2038 }
2039 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2040 {
2041 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2042 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2043 }
2044
2045 /* Is it there? */
2046 /** @todo testcase: Is this checked before the canonical / limit check below? */
2047 if (!pDesc->Legacy.Gen.u1Present)
2048 {
2049 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2050 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2051 }
2052
2053 return VINF_SUCCESS;
2054}
2055
2056
2057/**
2058 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2059 * not.
2060 *
2061 * @param a_pIemCpu The IEM per CPU data.
2062 * @param a_pCtx The CPU context.
2063 */
2064#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2065# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2066 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2067 ? (a_pCtx)->eflags.u \
2068 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2069#else
2070# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2071 ( (a_pCtx)->eflags.u )
2072#endif
2073
2074/**
2075 * Updates the EFLAGS in the correct manner wrt. PATM.
2076 *
2077 * @param a_pIemCpu The IEM per CPU data.
2078 * @param a_pCtx The CPU context.
2079 * @param a_fEfl The new EFLAGS.
2080 */
2081#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2082# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2083 do { \
2084 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2085 (a_pCtx)->eflags.u = (a_fEfl); \
2086 else \
2087 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2088 } while (0)
2089#else
2090# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2091 do { \
2092 (a_pCtx)->eflags.u = (a_fEfl); \
2093 } while (0)
2094#endif
2095
2096
2097/** @} */
2098
2099/** @name Raising Exceptions.
2100 *
2101 * @{
2102 */
2103
2104/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2105 * @{ */
2106/** CPU exception. */
2107#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2108/** External interrupt (from PIC, APIC, whatever). */
2109#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2110/** Software interrupt (int or into, not bound).
2111 * Returns to the following instruction */
2112#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2113/** Takes an error code. */
2114#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2115/** Takes a CR2. */
2116#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2117/** Generated by the breakpoint instruction. */
2118#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2119/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2120#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2121/** @} */
2122
2123
2124/**
2125 * Loads the specified stack far pointer from the TSS.
2126 *
2127 * @returns VBox strict status code.
2128 * @param pIemCpu The IEM per CPU instance data.
2129 * @param pCtx The CPU context.
2130 * @param uCpl The CPL to load the stack for.
2131 * @param pSelSS Where to return the new stack segment.
2132 * @param puEsp Where to return the new stack pointer.
2133 */
2134IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2135 PRTSEL pSelSS, uint32_t *puEsp)
2136{
2137 VBOXSTRICTRC rcStrict;
2138 Assert(uCpl < 4);
2139
2140 switch (pCtx->tr.Attr.n.u4Type)
2141 {
2142 /*
2143 * 16-bit TSS (X86TSS16).
2144 */
2145 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2146 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2147 {
2148 uint32_t off = uCpl * 4 + 2;
2149 if (off + 4 <= pCtx->tr.u32Limit)
2150 {
2151 /** @todo check actual access pattern here. */
2152 uint32_t u32Tmp = 0; /* gcc maybe... */
2153 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2154 if (rcStrict == VINF_SUCCESS)
2155 {
2156 *puEsp = RT_LOWORD(u32Tmp);
2157 *pSelSS = RT_HIWORD(u32Tmp);
2158 return VINF_SUCCESS;
2159 }
2160 }
2161 else
2162 {
2163 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2164 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2165 }
2166 break;
2167 }
2168
2169 /*
2170 * 32-bit TSS (X86TSS32).
2171 */
2172 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2173 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2174 {
2175 uint32_t off = uCpl * 8 + 4;
2176 if (off + 7 <= pCtx->tr.u32Limit)
2177 {
2178/** @todo check actual access pattern here. */
2179 uint64_t u64Tmp;
2180 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2181 if (rcStrict == VINF_SUCCESS)
2182 {
2183 *puEsp = u64Tmp & UINT32_MAX;
2184 *pSelSS = (RTSEL)(u64Tmp >> 32);
2185 return VINF_SUCCESS;
2186 }
2187 }
2188 else
2189 {
2190 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2191 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2192 }
2193 break;
2194 }
2195
2196 default:
2197 AssertFailed();
2198 rcStrict = VERR_IEM_IPE_4;
2199 break;
2200 }
2201
2202 *puEsp = 0; /* make gcc happy */
2203 *pSelSS = 0; /* make gcc happy */
2204 return rcStrict;
2205}
2206
2207
2208/**
2209 * Loads the specified stack pointer from the 64-bit TSS.
2210 *
2211 * @returns VBox strict status code.
2212 * @param pIemCpu The IEM per CPU instance data.
2213 * @param pCtx The CPU context.
2214 * @param uCpl The CPL to load the stack for.
2215 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2216 * @param puRsp Where to return the new stack pointer.
2217 */
2218IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2219{
2220 Assert(uCpl < 4);
2221 Assert(uIst < 8);
2222 *puRsp = 0; /* make gcc happy */
2223
2224 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2225
2226 uint32_t off;
2227 if (uIst)
2228 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2229 else
2230 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2231 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2232 {
2233 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2234 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2235 }
2236
2237 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2238}
2239
2240
2241/**
2242 * Adjust the CPU state according to the exception being raised.
2243 *
2244 * @param pCtx The CPU context.
2245 * @param u8Vector The exception that has been raised.
2246 */
2247DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2248{
2249 switch (u8Vector)
2250 {
2251 case X86_XCPT_DB:
2252 pCtx->dr[7] &= ~X86_DR7_GD;
2253 break;
2254 /** @todo Read the AMD and Intel exception reference... */
2255 }
2256}
2257
2258
2259/**
2260 * Implements exceptions and interrupts for real mode.
2261 *
2262 * @returns VBox strict status code.
2263 * @param pIemCpu The IEM per CPU instance data.
2264 * @param pCtx The CPU context.
2265 * @param cbInstr The number of bytes to offset rIP by in the return
2266 * address.
2267 * @param u8Vector The interrupt / exception vector number.
2268 * @param fFlags The flags.
2269 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2270 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2271 */
2272IEM_STATIC VBOXSTRICTRC
2273iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2274 PCPUMCTX pCtx,
2275 uint8_t cbInstr,
2276 uint8_t u8Vector,
2277 uint32_t fFlags,
2278 uint16_t uErr,
2279 uint64_t uCr2)
2280{
2281 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2282 NOREF(uErr); NOREF(uCr2);
2283
2284 /*
2285 * Read the IDT entry.
2286 */
2287 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2288 {
2289 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2290 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2291 }
2292 RTFAR16 Idte;
2293 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2294 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2295 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2296 return rcStrict;
2297
2298 /*
2299 * Push the stack frame.
2300 */
2301 uint16_t *pu16Frame;
2302 uint64_t uNewRsp;
2303 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2304 if (rcStrict != VINF_SUCCESS)
2305 return rcStrict;
2306
2307 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2308#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2309 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2310 if (pIemCpu->uTargetCpu <= IEMTARGETCPU_186)
2311 fEfl |= UINT16_C(0xf000);
2312#endif
2313 pu16Frame[2] = (uint16_t)fEfl;
2314 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2315 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2316 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2317 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2318 return rcStrict;
2319
2320 /*
2321 * Load the vector address into cs:ip and make exception specific state
2322 * adjustments.
2323 */
2324 pCtx->cs.Sel = Idte.sel;
2325 pCtx->cs.ValidSel = Idte.sel;
2326 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2327 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2328 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2329 pCtx->rip = Idte.off;
2330 fEfl &= ~X86_EFL_IF;
2331 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2332
2333 /** @todo do we actually do this in real mode? */
2334 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2335 iemRaiseXcptAdjustState(pCtx, u8Vector);
2336
2337 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2338}
2339
2340
2341/**
2342 * Loads a NULL data selector into when coming from V8086 mode.
2343 *
2344 * @param pIemCpu The IEM per CPU instance data.
2345 * @param pSReg Pointer to the segment register.
2346 */
2347IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2348{
2349 pSReg->Sel = 0;
2350 pSReg->ValidSel = 0;
2351 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2352 {
2353 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2354 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2355 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2356 }
2357 else
2358 {
2359 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2360 /** @todo check this on AMD-V */
2361 pSReg->u64Base = 0;
2362 pSReg->u32Limit = 0;
2363 }
2364}
2365
2366
2367/**
2368 * Loads a segment selector during a task switch in V8086 mode.
2369 *
2370 * @param pIemCpu The IEM per CPU instance data.
2371 * @param pSReg Pointer to the segment register.
2372 * @param uSel The selector value to load.
2373 */
2374IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2375{
2376 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2377 pSReg->Sel = uSel;
2378 pSReg->ValidSel = uSel;
2379 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2380 pSReg->u64Base = uSel << 4;
2381 pSReg->u32Limit = 0xffff;
2382 pSReg->Attr.u = 0xf3;
2383}
2384
2385
2386/**
2387 * Loads a NULL data selector into a selector register, both the hidden and
2388 * visible parts, in protected mode.
2389 *
2390 * @param pIemCpu The IEM state of the calling EMT.
2391 * @param pSReg Pointer to the segment register.
2392 * @param uRpl The RPL.
2393 */
2394IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2395{
2396 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2397 * data selector in protected mode. */
2398 pSReg->Sel = uRpl;
2399 pSReg->ValidSel = uRpl;
2400 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2401 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2402 {
2403 /* VT-x (Intel 3960x) observed doing something like this. */
2404 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2405 pSReg->u32Limit = UINT32_MAX;
2406 pSReg->u64Base = 0;
2407 }
2408 else
2409 {
2410 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2411 pSReg->u32Limit = 0;
2412 pSReg->u64Base = 0;
2413 }
2414}
2415
2416
2417/**
2418 * Loads a segment selector during a task switch in protected mode.
2419 *
2420 * In this task switch scenario, we would throw \#TS exceptions rather than
2421 * \#GPs.
2422 *
2423 * @returns VBox strict status code.
2424 * @param pIemCpu The IEM per CPU instance data.
2425 * @param pSReg Pointer to the segment register.
2426 * @param uSel The new selector value.
2427 *
2428 * @remarks This does _not_ handle CS or SS.
2429 * @remarks This expects pIemCpu->uCpl to be up to date.
2430 */
2431IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2432{
2433 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2434
2435 /* Null data selector. */
2436 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2437 {
2438 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2439 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2440 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2441 return VINF_SUCCESS;
2442 }
2443
2444 /* Fetch the descriptor. */
2445 IEMSELDESC Desc;
2446 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2447 if (rcStrict != VINF_SUCCESS)
2448 {
2449 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2450 VBOXSTRICTRC_VAL(rcStrict)));
2451 return rcStrict;
2452 }
2453
2454 /* Must be a data segment or readable code segment. */
2455 if ( !Desc.Legacy.Gen.u1DescType
2456 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2457 {
2458 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2459 Desc.Legacy.Gen.u4Type));
2460 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2461 }
2462
2463 /* Check privileges for data segments and non-conforming code segments. */
2464 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2465 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2466 {
2467 /* The RPL and the new CPL must be less than or equal to the DPL. */
2468 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2469 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2470 {
2471 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2472 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2473 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2474 }
2475 }
2476
2477 /* Is it there? */
2478 if (!Desc.Legacy.Gen.u1Present)
2479 {
2480 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2481 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2482 }
2483
2484 /* The base and limit. */
2485 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2486 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2487
2488 /*
2489 * Ok, everything checked out fine. Now set the accessed bit before
2490 * committing the result into the registers.
2491 */
2492 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2493 {
2494 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2495 if (rcStrict != VINF_SUCCESS)
2496 return rcStrict;
2497 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2498 }
2499
2500 /* Commit */
2501 pSReg->Sel = uSel;
2502 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2503 pSReg->u32Limit = cbLimit;
2504 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2505 pSReg->ValidSel = uSel;
2506 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2507 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2508 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2509
2510 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2511 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2512 return VINF_SUCCESS;
2513}
2514
2515
2516/**
2517 * Performs a task switch.
2518 *
2519 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2520 * caller is responsible for performing the necessary checks (like DPL, TSS
2521 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2522 * reference for JMP, CALL, IRET.
2523 *
2524 * If the task switch is the due to a software interrupt or hardware exception,
2525 * the caller is responsible for validating the TSS selector and descriptor. See
2526 * Intel Instruction reference for INT n.
2527 *
2528 * @returns VBox strict status code.
2529 * @param pIemCpu The IEM per CPU instance data.
2530 * @param pCtx The CPU context.
2531 * @param enmTaskSwitch What caused this task switch.
2532 * @param uNextEip The EIP effective after the task switch.
2533 * @param fFlags The flags.
2534 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2535 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2536 * @param SelTSS The TSS selector of the new task.
2537 * @param pNewDescTSS Pointer to the new TSS descriptor.
2538 */
2539IEM_STATIC VBOXSTRICTRC
2540iemTaskSwitch(PIEMCPU pIemCpu,
2541 PCPUMCTX pCtx,
2542 IEMTASKSWITCH enmTaskSwitch,
2543 uint32_t uNextEip,
2544 uint32_t fFlags,
2545 uint16_t uErr,
2546 uint64_t uCr2,
2547 RTSEL SelTSS,
2548 PIEMSELDESC pNewDescTSS)
2549{
2550 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2551 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2552
2553 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2554 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2555 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2556 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2557 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2558
2559 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2560 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2561
2562 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2563 fIsNewTSS386, pCtx->eip, uNextEip));
2564
2565 /* Update CR2 in case it's a page-fault. */
2566 /** @todo This should probably be done much earlier in IEM/PGM. See
2567 * @bugref{5653#c49}. */
2568 if (fFlags & IEM_XCPT_FLAGS_CR2)
2569 pCtx->cr2 = uCr2;
2570
2571 /*
2572 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2573 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2574 */
2575 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2576 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2577 if (uNewTSSLimit < uNewTSSLimitMin)
2578 {
2579 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2580 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2581 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2582 }
2583
2584 /*
2585 * Check the current TSS limit. The last written byte to the current TSS during the
2586 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2587 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2588 *
2589 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2590 * end up with smaller than "legal" TSS limits.
2591 */
2592 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2593 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2594 if (uCurTSSLimit < uCurTSSLimitMin)
2595 {
2596 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2597 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2598 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2599 }
2600
2601 /*
2602 * Verify that the new TSS can be accessed and map it. Map only the required contents
2603 * and not the entire TSS.
2604 */
2605 void *pvNewTSS;
2606 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2607 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2608 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2609 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2610 * not perform correct translation if this happens. See Intel spec. 7.2.1
2611 * "Task-State Segment" */
2612 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2613 if (rcStrict != VINF_SUCCESS)
2614 {
2615 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2616 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2617 return rcStrict;
2618 }
2619
2620 /*
2621 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2622 */
2623 uint32_t u32EFlags = pCtx->eflags.u32;
2624 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2625 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2626 {
2627 PX86DESC pDescCurTSS;
2628 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2629 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2630 if (rcStrict != VINF_SUCCESS)
2631 {
2632 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2633 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2634 return rcStrict;
2635 }
2636
2637 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2638 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2639 if (rcStrict != VINF_SUCCESS)
2640 {
2641 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2642 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2643 return rcStrict;
2644 }
2645
2646 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2647 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2648 {
2649 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2650 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2651 u32EFlags &= ~X86_EFL_NT;
2652 }
2653 }
2654
2655 /*
2656 * Save the CPU state into the current TSS.
2657 */
2658 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2659 if (GCPtrNewTSS == GCPtrCurTSS)
2660 {
2661 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2662 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2663 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2664 }
2665 if (fIsNewTSS386)
2666 {
2667 /*
2668 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2669 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2670 */
2671 void *pvCurTSS32;
2672 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2673 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2674 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2675 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2676 if (rcStrict != VINF_SUCCESS)
2677 {
2678 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2679 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2680 return rcStrict;
2681 }
2682
2683 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2684 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2685 pCurTSS32->eip = uNextEip;
2686 pCurTSS32->eflags = u32EFlags;
2687 pCurTSS32->eax = pCtx->eax;
2688 pCurTSS32->ecx = pCtx->ecx;
2689 pCurTSS32->edx = pCtx->edx;
2690 pCurTSS32->ebx = pCtx->ebx;
2691 pCurTSS32->esp = pCtx->esp;
2692 pCurTSS32->ebp = pCtx->ebp;
2693 pCurTSS32->esi = pCtx->esi;
2694 pCurTSS32->edi = pCtx->edi;
2695 pCurTSS32->es = pCtx->es.Sel;
2696 pCurTSS32->cs = pCtx->cs.Sel;
2697 pCurTSS32->ss = pCtx->ss.Sel;
2698 pCurTSS32->ds = pCtx->ds.Sel;
2699 pCurTSS32->fs = pCtx->fs.Sel;
2700 pCurTSS32->gs = pCtx->gs.Sel;
2701
2702 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2703 if (rcStrict != VINF_SUCCESS)
2704 {
2705 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2706 VBOXSTRICTRC_VAL(rcStrict)));
2707 return rcStrict;
2708 }
2709 }
2710 else
2711 {
2712 /*
2713 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2714 */
2715 void *pvCurTSS16;
2716 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2717 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2718 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2719 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2720 if (rcStrict != VINF_SUCCESS)
2721 {
2722 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2723 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2724 return rcStrict;
2725 }
2726
2727 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2728 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2729 pCurTSS16->ip = uNextEip;
2730 pCurTSS16->flags = u32EFlags;
2731 pCurTSS16->ax = pCtx->ax;
2732 pCurTSS16->cx = pCtx->cx;
2733 pCurTSS16->dx = pCtx->dx;
2734 pCurTSS16->bx = pCtx->bx;
2735 pCurTSS16->sp = pCtx->sp;
2736 pCurTSS16->bp = pCtx->bp;
2737 pCurTSS16->si = pCtx->si;
2738 pCurTSS16->di = pCtx->di;
2739 pCurTSS16->es = pCtx->es.Sel;
2740 pCurTSS16->cs = pCtx->cs.Sel;
2741 pCurTSS16->ss = pCtx->ss.Sel;
2742 pCurTSS16->ds = pCtx->ds.Sel;
2743
2744 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2745 if (rcStrict != VINF_SUCCESS)
2746 {
2747 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2748 VBOXSTRICTRC_VAL(rcStrict)));
2749 return rcStrict;
2750 }
2751 }
2752
2753 /*
2754 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2755 */
2756 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2757 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2758 {
2759 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2760 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2761 pNewTSS->selPrev = pCtx->tr.Sel;
2762 }
2763
2764 /*
2765 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2766 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2767 */
2768 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2769 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2770 bool fNewDebugTrap;
2771 if (fIsNewTSS386)
2772 {
2773 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2774 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2775 uNewEip = pNewTSS32->eip;
2776 uNewEflags = pNewTSS32->eflags;
2777 uNewEax = pNewTSS32->eax;
2778 uNewEcx = pNewTSS32->ecx;
2779 uNewEdx = pNewTSS32->edx;
2780 uNewEbx = pNewTSS32->ebx;
2781 uNewEsp = pNewTSS32->esp;
2782 uNewEbp = pNewTSS32->ebp;
2783 uNewEsi = pNewTSS32->esi;
2784 uNewEdi = pNewTSS32->edi;
2785 uNewES = pNewTSS32->es;
2786 uNewCS = pNewTSS32->cs;
2787 uNewSS = pNewTSS32->ss;
2788 uNewDS = pNewTSS32->ds;
2789 uNewFS = pNewTSS32->fs;
2790 uNewGS = pNewTSS32->gs;
2791 uNewLdt = pNewTSS32->selLdt;
2792 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2793 }
2794 else
2795 {
2796 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2797 uNewCr3 = 0;
2798 uNewEip = pNewTSS16->ip;
2799 uNewEflags = pNewTSS16->flags;
2800 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2801 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2802 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2803 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2804 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2805 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2806 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2807 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2808 uNewES = pNewTSS16->es;
2809 uNewCS = pNewTSS16->cs;
2810 uNewSS = pNewTSS16->ss;
2811 uNewDS = pNewTSS16->ds;
2812 uNewFS = 0;
2813 uNewGS = 0;
2814 uNewLdt = pNewTSS16->selLdt;
2815 fNewDebugTrap = false;
2816 }
2817
2818 if (GCPtrNewTSS == GCPtrCurTSS)
2819 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2820 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2821
2822 /*
2823 * We're done accessing the new TSS.
2824 */
2825 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2826 if (rcStrict != VINF_SUCCESS)
2827 {
2828 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2829 return rcStrict;
2830 }
2831
2832 /*
2833 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2834 */
2835 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2836 {
2837 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2838 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2839 if (rcStrict != VINF_SUCCESS)
2840 {
2841 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2842 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2843 return rcStrict;
2844 }
2845
2846 /* Check that the descriptor indicates the new TSS is available (not busy). */
2847 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2848 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2849 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2850
2851 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2852 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2853 if (rcStrict != VINF_SUCCESS)
2854 {
2855 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2856 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2857 return rcStrict;
2858 }
2859 }
2860
2861 /*
2862 * From this point on, we're technically in the new task. We will defer exceptions
2863 * until the completion of the task switch but before executing any instructions in the new task.
2864 */
2865 pCtx->tr.Sel = SelTSS;
2866 pCtx->tr.ValidSel = SelTSS;
2867 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2868 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2869 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2870 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2871 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2872
2873 /* Set the busy bit in TR. */
2874 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2875 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2876 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2877 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2878 {
2879 uNewEflags |= X86_EFL_NT;
2880 }
2881
2882 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2883 pCtx->cr0 |= X86_CR0_TS;
2884 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2885
2886 pCtx->eip = uNewEip;
2887 pCtx->eax = uNewEax;
2888 pCtx->ecx = uNewEcx;
2889 pCtx->edx = uNewEdx;
2890 pCtx->ebx = uNewEbx;
2891 pCtx->esp = uNewEsp;
2892 pCtx->ebp = uNewEbp;
2893 pCtx->esi = uNewEsi;
2894 pCtx->edi = uNewEdi;
2895
2896 uNewEflags &= X86_EFL_LIVE_MASK;
2897 uNewEflags |= X86_EFL_RA1_MASK;
2898 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2899
2900 /*
2901 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2902 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2903 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2904 */
2905 pCtx->es.Sel = uNewES;
2906 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2907 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2908
2909 pCtx->cs.Sel = uNewCS;
2910 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2911 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2912
2913 pCtx->ss.Sel = uNewSS;
2914 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2915 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2916
2917 pCtx->ds.Sel = uNewDS;
2918 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2919 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2920
2921 pCtx->fs.Sel = uNewFS;
2922 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2923 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2924
2925 pCtx->gs.Sel = uNewGS;
2926 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2927 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2928 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2929
2930 pCtx->ldtr.Sel = uNewLdt;
2931 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2932 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2933 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2934
2935 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2936 {
2937 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2938 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2939 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2940 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2941 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2942 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2943 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2944 }
2945
2946 /*
2947 * Switch CR3 for the new task.
2948 */
2949 if ( fIsNewTSS386
2950 && (pCtx->cr0 & X86_CR0_PG))
2951 {
2952 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2953 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2954 {
2955 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2956 AssertRCSuccessReturn(rc, rc);
2957 }
2958 else
2959 pCtx->cr3 = uNewCr3;
2960
2961 /* Inform PGM. */
2962 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2963 {
2964 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2965 AssertRCReturn(rc, rc);
2966 /* ignore informational status codes */
2967 }
2968 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2969 }
2970
2971 /*
2972 * Switch LDTR for the new task.
2973 */
2974 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2975 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2976 else
2977 {
2978 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2979
2980 IEMSELDESC DescNewLdt;
2981 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2982 if (rcStrict != VINF_SUCCESS)
2983 {
2984 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2985 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2986 return rcStrict;
2987 }
2988 if ( !DescNewLdt.Legacy.Gen.u1Present
2989 || DescNewLdt.Legacy.Gen.u1DescType
2990 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2991 {
2992 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2993 uNewLdt, DescNewLdt.Legacy.u));
2994 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2995 }
2996
2997 pCtx->ldtr.ValidSel = uNewLdt;
2998 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2999 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3000 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3001 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3002 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3003 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3004 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
3005 }
3006
3007 IEMSELDESC DescSS;
3008 if (IEM_IS_V86_MODE(pIemCpu))
3009 {
3010 pIemCpu->uCpl = 3;
3011 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
3012 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
3013 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
3014 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
3015 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
3016 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
3017 }
3018 else
3019 {
3020 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
3021
3022 /*
3023 * Load the stack segment for the new task.
3024 */
3025 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3026 {
3027 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3028 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3029 }
3030
3031 /* Fetch the descriptor. */
3032 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
3033 if (rcStrict != VINF_SUCCESS)
3034 {
3035 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3036 VBOXSTRICTRC_VAL(rcStrict)));
3037 return rcStrict;
3038 }
3039
3040 /* SS must be a data segment and writable. */
3041 if ( !DescSS.Legacy.Gen.u1DescType
3042 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3043 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3044 {
3045 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3046 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3047 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3048 }
3049
3050 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3051 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3052 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3053 {
3054 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3055 uNewCpl));
3056 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3057 }
3058
3059 /* Is it there? */
3060 if (!DescSS.Legacy.Gen.u1Present)
3061 {
3062 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3063 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3064 }
3065
3066 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3067 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3068
3069 /* Set the accessed bit before committing the result into SS. */
3070 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3071 {
3072 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3073 if (rcStrict != VINF_SUCCESS)
3074 return rcStrict;
3075 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3076 }
3077
3078 /* Commit SS. */
3079 pCtx->ss.Sel = uNewSS;
3080 pCtx->ss.ValidSel = uNewSS;
3081 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3082 pCtx->ss.u32Limit = cbLimit;
3083 pCtx->ss.u64Base = u64Base;
3084 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3085 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3086
3087 /* CPL has changed, update IEM before loading rest of segments. */
3088 pIemCpu->uCpl = uNewCpl;
3089
3090 /*
3091 * Load the data segments for the new task.
3092 */
3093 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3094 if (rcStrict != VINF_SUCCESS)
3095 return rcStrict;
3096 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3097 if (rcStrict != VINF_SUCCESS)
3098 return rcStrict;
3099 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3100 if (rcStrict != VINF_SUCCESS)
3101 return rcStrict;
3102 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3103 if (rcStrict != VINF_SUCCESS)
3104 return rcStrict;
3105
3106 /*
3107 * Load the code segment for the new task.
3108 */
3109 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3110 {
3111 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3112 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3113 }
3114
3115 /* Fetch the descriptor. */
3116 IEMSELDESC DescCS;
3117 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3118 if (rcStrict != VINF_SUCCESS)
3119 {
3120 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3121 return rcStrict;
3122 }
3123
3124 /* CS must be a code segment. */
3125 if ( !DescCS.Legacy.Gen.u1DescType
3126 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3127 {
3128 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3129 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3130 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3131 }
3132
3133 /* For conforming CS, DPL must be less than or equal to the RPL. */
3134 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3135 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3136 {
3137 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3138 DescCS.Legacy.Gen.u2Dpl));
3139 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3140 }
3141
3142 /* For non-conforming CS, DPL must match RPL. */
3143 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3144 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3145 {
3146 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3147 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3148 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3149 }
3150
3151 /* Is it there? */
3152 if (!DescCS.Legacy.Gen.u1Present)
3153 {
3154 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3155 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3156 }
3157
3158 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3159 u64Base = X86DESC_BASE(&DescCS.Legacy);
3160
3161 /* Set the accessed bit before committing the result into CS. */
3162 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3163 {
3164 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3165 if (rcStrict != VINF_SUCCESS)
3166 return rcStrict;
3167 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3168 }
3169
3170 /* Commit CS. */
3171 pCtx->cs.Sel = uNewCS;
3172 pCtx->cs.ValidSel = uNewCS;
3173 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3174 pCtx->cs.u32Limit = cbLimit;
3175 pCtx->cs.u64Base = u64Base;
3176 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3177 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3178 }
3179
3180 /** @todo Debug trap. */
3181 if (fIsNewTSS386 && fNewDebugTrap)
3182 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3183
3184 /*
3185 * Construct the error code masks based on what caused this task switch.
3186 * See Intel Instruction reference for INT.
3187 */
3188 uint16_t uExt;
3189 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3190 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3191 {
3192 uExt = 1;
3193 }
3194 else
3195 uExt = 0;
3196
3197 /*
3198 * Push any error code on to the new stack.
3199 */
3200 if (fFlags & IEM_XCPT_FLAGS_ERR)
3201 {
3202 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3203 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3204 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3205
3206 /* Check that there is sufficient space on the stack. */
3207 /** @todo Factor out segment limit checking for normal/expand down segments
3208 * into a separate function. */
3209 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3210 {
3211 if ( pCtx->esp - 1 > cbLimitSS
3212 || pCtx->esp < cbStackFrame)
3213 {
3214 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3215 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3216 cbStackFrame));
3217 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3218 }
3219 }
3220 else
3221 {
3222 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3223 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3224 {
3225 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3226 cbStackFrame));
3227 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3228 }
3229 }
3230
3231
3232 if (fIsNewTSS386)
3233 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3234 else
3235 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3236 if (rcStrict != VINF_SUCCESS)
3237 {
3238 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3239 VBOXSTRICTRC_VAL(rcStrict)));
3240 return rcStrict;
3241 }
3242 }
3243
3244 /* Check the new EIP against the new CS limit. */
3245 if (pCtx->eip > pCtx->cs.u32Limit)
3246 {
3247 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3248 pCtx->eip, pCtx->cs.u32Limit));
3249 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3250 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3251 }
3252
3253 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3254 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3255}
3256
3257
3258/**
3259 * Implements exceptions and interrupts for protected mode.
3260 *
3261 * @returns VBox strict status code.
3262 * @param pIemCpu The IEM per CPU instance data.
3263 * @param pCtx The CPU context.
3264 * @param cbInstr The number of bytes to offset rIP by in the return
3265 * address.
3266 * @param u8Vector The interrupt / exception vector number.
3267 * @param fFlags The flags.
3268 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3269 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3270 */
3271IEM_STATIC VBOXSTRICTRC
3272iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3273 PCPUMCTX pCtx,
3274 uint8_t cbInstr,
3275 uint8_t u8Vector,
3276 uint32_t fFlags,
3277 uint16_t uErr,
3278 uint64_t uCr2)
3279{
3280 /*
3281 * Read the IDT entry.
3282 */
3283 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3284 {
3285 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3286 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3287 }
3288 X86DESC Idte;
3289 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3290 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3291 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3292 return rcStrict;
3293 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3294 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3295 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3296
3297 /*
3298 * Check the descriptor type, DPL and such.
3299 * ASSUMES this is done in the same order as described for call-gate calls.
3300 */
3301 if (Idte.Gate.u1DescType)
3302 {
3303 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3304 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3305 }
3306 bool fTaskGate = false;
3307 uint8_t f32BitGate = true;
3308 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3309 switch (Idte.Gate.u4Type)
3310 {
3311 case X86_SEL_TYPE_SYS_UNDEFINED:
3312 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3313 case X86_SEL_TYPE_SYS_LDT:
3314 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3315 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3316 case X86_SEL_TYPE_SYS_UNDEFINED2:
3317 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3318 case X86_SEL_TYPE_SYS_UNDEFINED3:
3319 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3320 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3321 case X86_SEL_TYPE_SYS_UNDEFINED4:
3322 {
3323 /** @todo check what actually happens when the type is wrong...
3324 * esp. call gates. */
3325 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3326 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3327 }
3328
3329 case X86_SEL_TYPE_SYS_286_INT_GATE:
3330 f32BitGate = false;
3331 case X86_SEL_TYPE_SYS_386_INT_GATE:
3332 fEflToClear |= X86_EFL_IF;
3333 break;
3334
3335 case X86_SEL_TYPE_SYS_TASK_GATE:
3336 fTaskGate = true;
3337#ifndef IEM_IMPLEMENTS_TASKSWITCH
3338 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3339#endif
3340 break;
3341
3342 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3343 f32BitGate = false;
3344 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3345 break;
3346
3347 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3348 }
3349
3350 /* Check DPL against CPL if applicable. */
3351 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3352 {
3353 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3354 {
3355 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3356 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3357 }
3358 }
3359
3360 /* Is it there? */
3361 if (!Idte.Gate.u1Present)
3362 {
3363 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3364 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3365 }
3366
3367 /* Is it a task-gate? */
3368 if (fTaskGate)
3369 {
3370 /*
3371 * Construct the error code masks based on what caused this task switch.
3372 * See Intel Instruction reference for INT.
3373 */
3374 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3375 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3376 RTSEL SelTSS = Idte.Gate.u16Sel;
3377
3378 /*
3379 * Fetch the TSS descriptor in the GDT.
3380 */
3381 IEMSELDESC DescTSS;
3382 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3383 if (rcStrict != VINF_SUCCESS)
3384 {
3385 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3386 VBOXSTRICTRC_VAL(rcStrict)));
3387 return rcStrict;
3388 }
3389
3390 /* The TSS descriptor must be a system segment and be available (not busy). */
3391 if ( DescTSS.Legacy.Gen.u1DescType
3392 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3393 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3394 {
3395 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3396 u8Vector, SelTSS, DescTSS.Legacy.au64));
3397 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3398 }
3399
3400 /* The TSS must be present. */
3401 if (!DescTSS.Legacy.Gen.u1Present)
3402 {
3403 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3404 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3405 }
3406
3407 /* Do the actual task switch. */
3408 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3409 }
3410
3411 /* A null CS is bad. */
3412 RTSEL NewCS = Idte.Gate.u16Sel;
3413 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3414 {
3415 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3416 return iemRaiseGeneralProtectionFault0(pIemCpu);
3417 }
3418
3419 /* Fetch the descriptor for the new CS. */
3420 IEMSELDESC DescCS;
3421 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3422 if (rcStrict != VINF_SUCCESS)
3423 {
3424 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3425 return rcStrict;
3426 }
3427
3428 /* Must be a code segment. */
3429 if (!DescCS.Legacy.Gen.u1DescType)
3430 {
3431 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3432 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3433 }
3434 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3435 {
3436 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3437 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3438 }
3439
3440 /* Don't allow lowering the privilege level. */
3441 /** @todo Does the lowering of privileges apply to software interrupts
3442 * only? This has bearings on the more-privileged or
3443 * same-privilege stack behavior further down. A testcase would
3444 * be nice. */
3445 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3446 {
3447 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3448 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3449 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3450 }
3451
3452 /* Make sure the selector is present. */
3453 if (!DescCS.Legacy.Gen.u1Present)
3454 {
3455 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3456 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3457 }
3458
3459 /* Check the new EIP against the new CS limit. */
3460 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3461 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3462 ? Idte.Gate.u16OffsetLow
3463 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3464 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3465 if (uNewEip > cbLimitCS)
3466 {
3467 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3468 u8Vector, uNewEip, cbLimitCS, NewCS));
3469 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3470 }
3471
3472 /* Calc the flag image to push. */
3473 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3474 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3475 fEfl &= ~X86_EFL_RF;
3476 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3477 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3478
3479 /* From V8086 mode only go to CPL 0. */
3480 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3481 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3482 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3483 {
3484 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3485 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3486 }
3487
3488 /*
3489 * If the privilege level changes, we need to get a new stack from the TSS.
3490 * This in turns means validating the new SS and ESP...
3491 */
3492 if (uNewCpl != pIemCpu->uCpl)
3493 {
3494 RTSEL NewSS;
3495 uint32_t uNewEsp;
3496 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3497 if (rcStrict != VINF_SUCCESS)
3498 return rcStrict;
3499
3500 IEMSELDESC DescSS;
3501 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3502 if (rcStrict != VINF_SUCCESS)
3503 return rcStrict;
3504
3505 /* Check that there is sufficient space for the stack frame. */
3506 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3507 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3508 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3509 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3510
3511 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3512 {
3513 if ( uNewEsp - 1 > cbLimitSS
3514 || uNewEsp < cbStackFrame)
3515 {
3516 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3517 u8Vector, NewSS, uNewEsp, cbStackFrame));
3518 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3519 }
3520 }
3521 else
3522 {
3523 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3524 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3525 {
3526 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3527 u8Vector, NewSS, uNewEsp, cbStackFrame));
3528 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3529 }
3530 }
3531
3532 /*
3533 * Start making changes.
3534 */
3535
3536 /* Create the stack frame. */
3537 RTPTRUNION uStackFrame;
3538 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3539 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3540 if (rcStrict != VINF_SUCCESS)
3541 return rcStrict;
3542 void * const pvStackFrame = uStackFrame.pv;
3543 if (f32BitGate)
3544 {
3545 if (fFlags & IEM_XCPT_FLAGS_ERR)
3546 *uStackFrame.pu32++ = uErr;
3547 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3548 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3549 uStackFrame.pu32[2] = fEfl;
3550 uStackFrame.pu32[3] = pCtx->esp;
3551 uStackFrame.pu32[4] = pCtx->ss.Sel;
3552 if (fEfl & X86_EFL_VM)
3553 {
3554 uStackFrame.pu32[1] = pCtx->cs.Sel;
3555 uStackFrame.pu32[5] = pCtx->es.Sel;
3556 uStackFrame.pu32[6] = pCtx->ds.Sel;
3557 uStackFrame.pu32[7] = pCtx->fs.Sel;
3558 uStackFrame.pu32[8] = pCtx->gs.Sel;
3559 }
3560 }
3561 else
3562 {
3563 if (fFlags & IEM_XCPT_FLAGS_ERR)
3564 *uStackFrame.pu16++ = uErr;
3565 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3566 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3567 uStackFrame.pu16[2] = fEfl;
3568 uStackFrame.pu16[3] = pCtx->sp;
3569 uStackFrame.pu16[4] = pCtx->ss.Sel;
3570 if (fEfl & X86_EFL_VM)
3571 {
3572 uStackFrame.pu16[1] = pCtx->cs.Sel;
3573 uStackFrame.pu16[5] = pCtx->es.Sel;
3574 uStackFrame.pu16[6] = pCtx->ds.Sel;
3575 uStackFrame.pu16[7] = pCtx->fs.Sel;
3576 uStackFrame.pu16[8] = pCtx->gs.Sel;
3577 }
3578 }
3579 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3580 if (rcStrict != VINF_SUCCESS)
3581 return rcStrict;
3582
3583 /* Mark the selectors 'accessed' (hope this is the correct time). */
3584 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3585 * after pushing the stack frame? (Write protect the gdt + stack to
3586 * find out.) */
3587 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3588 {
3589 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3590 if (rcStrict != VINF_SUCCESS)
3591 return rcStrict;
3592 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3593 }
3594
3595 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3596 {
3597 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3598 if (rcStrict != VINF_SUCCESS)
3599 return rcStrict;
3600 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3601 }
3602
3603 /*
3604 * Start comitting the register changes (joins with the DPL=CPL branch).
3605 */
3606 pCtx->ss.Sel = NewSS;
3607 pCtx->ss.ValidSel = NewSS;
3608 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3609 pCtx->ss.u32Limit = cbLimitSS;
3610 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3611 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3612 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3613 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3614 * SP is loaded).
3615 * Need to check the other combinations too:
3616 * - 16-bit TSS, 32-bit handler
3617 * - 32-bit TSS, 16-bit handler */
3618 if (!pCtx->ss.Attr.n.u1DefBig)
3619 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
3620 else
3621 pCtx->rsp = uNewEsp - cbStackFrame;
3622 pIemCpu->uCpl = uNewCpl;
3623
3624 if (fEfl & X86_EFL_VM)
3625 {
3626 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3627 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3628 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3629 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3630 }
3631 }
3632 /*
3633 * Same privilege, no stack change and smaller stack frame.
3634 */
3635 else
3636 {
3637 uint64_t uNewRsp;
3638 RTPTRUNION uStackFrame;
3639 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3640 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3641 if (rcStrict != VINF_SUCCESS)
3642 return rcStrict;
3643 void * const pvStackFrame = uStackFrame.pv;
3644
3645 if (f32BitGate)
3646 {
3647 if (fFlags & IEM_XCPT_FLAGS_ERR)
3648 *uStackFrame.pu32++ = uErr;
3649 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3650 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3651 uStackFrame.pu32[2] = fEfl;
3652 }
3653 else
3654 {
3655 if (fFlags & IEM_XCPT_FLAGS_ERR)
3656 *uStackFrame.pu16++ = uErr;
3657 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3658 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3659 uStackFrame.pu16[2] = fEfl;
3660 }
3661 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3662 if (rcStrict != VINF_SUCCESS)
3663 return rcStrict;
3664
3665 /* Mark the CS selector as 'accessed'. */
3666 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3667 {
3668 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3669 if (rcStrict != VINF_SUCCESS)
3670 return rcStrict;
3671 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3672 }
3673
3674 /*
3675 * Start committing the register changes (joins with the other branch).
3676 */
3677 pCtx->rsp = uNewRsp;
3678 }
3679
3680 /* ... register committing continues. */
3681 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3682 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3683 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3684 pCtx->cs.u32Limit = cbLimitCS;
3685 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3686 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3687
3688 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3689 fEfl &= ~fEflToClear;
3690 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3691
3692 if (fFlags & IEM_XCPT_FLAGS_CR2)
3693 pCtx->cr2 = uCr2;
3694
3695 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3696 iemRaiseXcptAdjustState(pCtx, u8Vector);
3697
3698 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3699}
3700
3701
3702/**
3703 * Implements exceptions and interrupts for long mode.
3704 *
3705 * @returns VBox strict status code.
3706 * @param pIemCpu The IEM per CPU instance data.
3707 * @param pCtx The CPU context.
3708 * @param cbInstr The number of bytes to offset rIP by in the return
3709 * address.
3710 * @param u8Vector The interrupt / exception vector number.
3711 * @param fFlags The flags.
3712 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3713 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3714 */
3715IEM_STATIC VBOXSTRICTRC
3716iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3717 PCPUMCTX pCtx,
3718 uint8_t cbInstr,
3719 uint8_t u8Vector,
3720 uint32_t fFlags,
3721 uint16_t uErr,
3722 uint64_t uCr2)
3723{
3724 /*
3725 * Read the IDT entry.
3726 */
3727 uint16_t offIdt = (uint16_t)u8Vector << 4;
3728 if (pCtx->idtr.cbIdt < offIdt + 7)
3729 {
3730 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3731 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3732 }
3733 X86DESC64 Idte;
3734 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3735 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3736 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3737 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3738 return rcStrict;
3739 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3740 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3741 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3742
3743 /*
3744 * Check the descriptor type, DPL and such.
3745 * ASSUMES this is done in the same order as described for call-gate calls.
3746 */
3747 if (Idte.Gate.u1DescType)
3748 {
3749 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3750 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3751 }
3752 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3753 switch (Idte.Gate.u4Type)
3754 {
3755 case AMD64_SEL_TYPE_SYS_INT_GATE:
3756 fEflToClear |= X86_EFL_IF;
3757 break;
3758 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3759 break;
3760
3761 default:
3762 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3763 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3764 }
3765
3766 /* Check DPL against CPL if applicable. */
3767 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3768 {
3769 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3770 {
3771 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3772 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3773 }
3774 }
3775
3776 /* Is it there? */
3777 if (!Idte.Gate.u1Present)
3778 {
3779 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3780 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3781 }
3782
3783 /* A null CS is bad. */
3784 RTSEL NewCS = Idte.Gate.u16Sel;
3785 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3786 {
3787 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3788 return iemRaiseGeneralProtectionFault0(pIemCpu);
3789 }
3790
3791 /* Fetch the descriptor for the new CS. */
3792 IEMSELDESC DescCS;
3793 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3794 if (rcStrict != VINF_SUCCESS)
3795 {
3796 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3797 return rcStrict;
3798 }
3799
3800 /* Must be a 64-bit code segment. */
3801 if (!DescCS.Long.Gen.u1DescType)
3802 {
3803 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3804 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3805 }
3806 if ( !DescCS.Long.Gen.u1Long
3807 || DescCS.Long.Gen.u1DefBig
3808 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3809 {
3810 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3811 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3812 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3813 }
3814
3815 /* Don't allow lowering the privilege level. For non-conforming CS
3816 selectors, the CS.DPL sets the privilege level the trap/interrupt
3817 handler runs at. For conforming CS selectors, the CPL remains
3818 unchanged, but the CS.DPL must be <= CPL. */
3819 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3820 * when CPU in Ring-0. Result \#GP? */
3821 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3822 {
3823 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3824 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3825 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3826 }
3827
3828
3829 /* Make sure the selector is present. */
3830 if (!DescCS.Legacy.Gen.u1Present)
3831 {
3832 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3833 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3834 }
3835
3836 /* Check that the new RIP is canonical. */
3837 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3838 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3839 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3840 if (!IEM_IS_CANONICAL(uNewRip))
3841 {
3842 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3843 return iemRaiseGeneralProtectionFault0(pIemCpu);
3844 }
3845
3846 /*
3847 * If the privilege level changes or if the IST isn't zero, we need to get
3848 * a new stack from the TSS.
3849 */
3850 uint64_t uNewRsp;
3851 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3852 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3853 if ( uNewCpl != pIemCpu->uCpl
3854 || Idte.Gate.u3IST != 0)
3855 {
3856 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3857 if (rcStrict != VINF_SUCCESS)
3858 return rcStrict;
3859 }
3860 else
3861 uNewRsp = pCtx->rsp;
3862 uNewRsp &= ~(uint64_t)0xf;
3863
3864 /*
3865 * Calc the flag image to push.
3866 */
3867 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3868 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3869 fEfl &= ~X86_EFL_RF;
3870 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3871 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3872
3873 /*
3874 * Start making changes.
3875 */
3876
3877 /* Create the stack frame. */
3878 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3879 RTPTRUNION uStackFrame;
3880 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3881 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3882 if (rcStrict != VINF_SUCCESS)
3883 return rcStrict;
3884 void * const pvStackFrame = uStackFrame.pv;
3885
3886 if (fFlags & IEM_XCPT_FLAGS_ERR)
3887 *uStackFrame.pu64++ = uErr;
3888 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3889 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3890 uStackFrame.pu64[2] = fEfl;
3891 uStackFrame.pu64[3] = pCtx->rsp;
3892 uStackFrame.pu64[4] = pCtx->ss.Sel;
3893 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3894 if (rcStrict != VINF_SUCCESS)
3895 return rcStrict;
3896
3897 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3898 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3899 * after pushing the stack frame? (Write protect the gdt + stack to
3900 * find out.) */
3901 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3902 {
3903 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3904 if (rcStrict != VINF_SUCCESS)
3905 return rcStrict;
3906 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3907 }
3908
3909 /*
3910 * Start comitting the register changes.
3911 */
3912 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3913 * hidden registers when interrupting 32-bit or 16-bit code! */
3914 if (uNewCpl != pIemCpu->uCpl)
3915 {
3916 pCtx->ss.Sel = 0 | uNewCpl;
3917 pCtx->ss.ValidSel = 0 | uNewCpl;
3918 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3919 pCtx->ss.u32Limit = UINT32_MAX;
3920 pCtx->ss.u64Base = 0;
3921 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3922 }
3923 pCtx->rsp = uNewRsp - cbStackFrame;
3924 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3925 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3926 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3927 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3928 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3929 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3930 pCtx->rip = uNewRip;
3931 pIemCpu->uCpl = uNewCpl;
3932
3933 fEfl &= ~fEflToClear;
3934 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3935
3936 if (fFlags & IEM_XCPT_FLAGS_CR2)
3937 pCtx->cr2 = uCr2;
3938
3939 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3940 iemRaiseXcptAdjustState(pCtx, u8Vector);
3941
3942 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3943}
3944
3945
3946/**
3947 * Implements exceptions and interrupts.
3948 *
3949 * All exceptions and interrupts goes thru this function!
3950 *
3951 * @returns VBox strict status code.
3952 * @param pIemCpu The IEM per CPU instance data.
3953 * @param cbInstr The number of bytes to offset rIP by in the return
3954 * address.
3955 * @param u8Vector The interrupt / exception vector number.
3956 * @param fFlags The flags.
3957 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3958 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3959 */
3960DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3961iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3962 uint8_t cbInstr,
3963 uint8_t u8Vector,
3964 uint32_t fFlags,
3965 uint16_t uErr,
3966 uint64_t uCr2)
3967{
3968 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3969#ifdef IN_RING0
3970 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3971 AssertRCReturn(rc, rc);
3972#endif
3973
3974 /*
3975 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3976 */
3977 if ( pCtx->eflags.Bits.u1VM
3978 && pCtx->eflags.Bits.u2IOPL != 3
3979 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3980 && (pCtx->cr0 & X86_CR0_PE) )
3981 {
3982 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3983 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3984 u8Vector = X86_XCPT_GP;
3985 uErr = 0;
3986 }
3987#ifdef DBGFTRACE_ENABLED
3988 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3989 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3990 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3991#endif
3992
3993 /*
3994 * Do recursion accounting.
3995 */
3996 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3997 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3998 if (pIemCpu->cXcptRecursions == 0)
3999 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4000 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
4001 else
4002 {
4003 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4004 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
4005
4006 /** @todo double and tripple faults. */
4007 if (pIemCpu->cXcptRecursions >= 3)
4008 {
4009#ifdef DEBUG_bird
4010 AssertFailed();
4011#endif
4012 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4013 }
4014
4015 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
4016 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
4017 {
4018 ....
4019 } */
4020 }
4021 pIemCpu->cXcptRecursions++;
4022 pIemCpu->uCurXcpt = u8Vector;
4023 pIemCpu->fCurXcpt = fFlags;
4024
4025 /*
4026 * Extensive logging.
4027 */
4028#if defined(LOG_ENABLED) && defined(IN_RING3)
4029 if (LogIs3Enabled())
4030 {
4031 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4032 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4033 char szRegs[4096];
4034 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4035 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4036 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4037 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4038 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4039 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4040 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4041 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4042 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4043 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4044 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4045 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4046 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4047 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4048 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4049 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4050 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4051 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4052 " efer=%016VR{efer}\n"
4053 " pat=%016VR{pat}\n"
4054 " sf_mask=%016VR{sf_mask}\n"
4055 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4056 " lstar=%016VR{lstar}\n"
4057 " star=%016VR{star} cstar=%016VR{cstar}\n"
4058 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4059 );
4060
4061 char szInstr[256];
4062 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4063 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4064 szInstr, sizeof(szInstr), NULL);
4065 Log3(("%s%s\n", szRegs, szInstr));
4066 }
4067#endif /* LOG_ENABLED */
4068
4069 /*
4070 * Call the mode specific worker function.
4071 */
4072 VBOXSTRICTRC rcStrict;
4073 if (!(pCtx->cr0 & X86_CR0_PE))
4074 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4075 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4076 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4077 else
4078 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4079
4080 /*
4081 * Unwind.
4082 */
4083 pIemCpu->cXcptRecursions--;
4084 pIemCpu->uCurXcpt = uPrevXcpt;
4085 pIemCpu->fCurXcpt = fPrevXcpt;
4086 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4087 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4088 return rcStrict;
4089}
4090
4091
4092/** \#DE - 00. */
4093DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4094{
4095 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4096}
4097
4098
4099/** \#DB - 01.
4100 * @note This automatically clear DR7.GD. */
4101DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4102{
4103 /** @todo set/clear RF. */
4104 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4105 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4106}
4107
4108
4109/** \#UD - 06. */
4110DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4111{
4112 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4113}
4114
4115
4116/** \#NM - 07. */
4117DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4118{
4119 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4120}
4121
4122
4123/** \#TS(err) - 0a. */
4124DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4125{
4126 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4127}
4128
4129
4130/** \#TS(tr) - 0a. */
4131DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4132{
4133 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4134 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4135}
4136
4137
4138/** \#TS(0) - 0a. */
4139DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4140{
4141 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4142 0, 0);
4143}
4144
4145
4146/** \#TS(err) - 0a. */
4147DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4148{
4149 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4150 uSel & X86_SEL_MASK_OFF_RPL, 0);
4151}
4152
4153
4154/** \#NP(err) - 0b. */
4155DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4156{
4157 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4158}
4159
4160
4161/** \#NP(seg) - 0b. */
4162DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4163{
4164 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4165 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4166}
4167
4168
4169/** \#NP(sel) - 0b. */
4170DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4171{
4172 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4173 uSel & ~X86_SEL_RPL, 0);
4174}
4175
4176
4177/** \#SS(seg) - 0c. */
4178DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4179{
4180 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4181 uSel & ~X86_SEL_RPL, 0);
4182}
4183
4184
4185/** \#SS(err) - 0c. */
4186DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4187{
4188 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4189}
4190
4191
4192/** \#GP(n) - 0d. */
4193DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4194{
4195 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4196}
4197
4198
4199/** \#GP(0) - 0d. */
4200DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4201{
4202 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4203}
4204
4205
4206/** \#GP(sel) - 0d. */
4207DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4208{
4209 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4210 Sel & ~X86_SEL_RPL, 0);
4211}
4212
4213
4214/** \#GP(0) - 0d. */
4215DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4216{
4217 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4218}
4219
4220
4221/** \#GP(sel) - 0d. */
4222DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4223{
4224 NOREF(iSegReg); NOREF(fAccess);
4225 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4226 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4227}
4228
4229
4230/** \#GP(sel) - 0d. */
4231DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4232{
4233 NOREF(Sel);
4234 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4235}
4236
4237
4238/** \#GP(sel) - 0d. */
4239DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4240{
4241 NOREF(iSegReg); NOREF(fAccess);
4242 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4243}
4244
4245
4246/** \#PF(n) - 0e. */
4247DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4248{
4249 uint16_t uErr;
4250 switch (rc)
4251 {
4252 case VERR_PAGE_NOT_PRESENT:
4253 case VERR_PAGE_TABLE_NOT_PRESENT:
4254 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4255 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4256 uErr = 0;
4257 break;
4258
4259 default:
4260 AssertMsgFailed(("%Rrc\n", rc));
4261 case VERR_ACCESS_DENIED:
4262 uErr = X86_TRAP_PF_P;
4263 break;
4264
4265 /** @todo reserved */
4266 }
4267
4268 if (pIemCpu->uCpl == 3)
4269 uErr |= X86_TRAP_PF_US;
4270
4271 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4272 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4273 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4274 uErr |= X86_TRAP_PF_ID;
4275
4276#if 0 /* This is so much non-sense, really. Why was it done like that? */
4277 /* Note! RW access callers reporting a WRITE protection fault, will clear
4278 the READ flag before calling. So, read-modify-write accesses (RW)
4279 can safely be reported as READ faults. */
4280 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4281 uErr |= X86_TRAP_PF_RW;
4282#else
4283 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4284 {
4285 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4286 uErr |= X86_TRAP_PF_RW;
4287 }
4288#endif
4289
4290 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4291 uErr, GCPtrWhere);
4292}
4293
4294
4295/** \#MF(0) - 10. */
4296DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4297{
4298 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4299}
4300
4301
4302/** \#AC(0) - 11. */
4303DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4304{
4305 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4306}
4307
4308
4309/**
4310 * Macro for calling iemCImplRaiseDivideError().
4311 *
4312 * This enables us to add/remove arguments and force different levels of
4313 * inlining as we wish.
4314 *
4315 * @return Strict VBox status code.
4316 */
4317#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4318IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4319{
4320 NOREF(cbInstr);
4321 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4322}
4323
4324
4325/**
4326 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4327 *
4328 * This enables us to add/remove arguments and force different levels of
4329 * inlining as we wish.
4330 *
4331 * @return Strict VBox status code.
4332 */
4333#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4334IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4335{
4336 NOREF(cbInstr);
4337 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4338}
4339
4340
4341/**
4342 * Macro for calling iemCImplRaiseInvalidOpcode().
4343 *
4344 * This enables us to add/remove arguments and force different levels of
4345 * inlining as we wish.
4346 *
4347 * @return Strict VBox status code.
4348 */
4349#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4350IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4351{
4352 NOREF(cbInstr);
4353 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4354}
4355
4356
4357/** @} */
4358
4359
4360/*
4361 *
4362 * Helpers routines.
4363 * Helpers routines.
4364 * Helpers routines.
4365 *
4366 */
4367
4368/**
4369 * Recalculates the effective operand size.
4370 *
4371 * @param pIemCpu The IEM state.
4372 */
4373IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4374{
4375 switch (pIemCpu->enmCpuMode)
4376 {
4377 case IEMMODE_16BIT:
4378 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4379 break;
4380 case IEMMODE_32BIT:
4381 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4382 break;
4383 case IEMMODE_64BIT:
4384 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4385 {
4386 case 0:
4387 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4388 break;
4389 case IEM_OP_PRF_SIZE_OP:
4390 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4391 break;
4392 case IEM_OP_PRF_SIZE_REX_W:
4393 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4394 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4395 break;
4396 }
4397 break;
4398 default:
4399 AssertFailed();
4400 }
4401}
4402
4403
4404/**
4405 * Sets the default operand size to 64-bit and recalculates the effective
4406 * operand size.
4407 *
4408 * @param pIemCpu The IEM state.
4409 */
4410IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4411{
4412 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4413 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4414 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4415 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4416 else
4417 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4418}
4419
4420
4421/*
4422 *
4423 * Common opcode decoders.
4424 * Common opcode decoders.
4425 * Common opcode decoders.
4426 *
4427 */
4428//#include <iprt/mem.h>
4429
4430/**
4431 * Used to add extra details about a stub case.
4432 * @param pIemCpu The IEM per CPU state.
4433 */
4434IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4435{
4436#if defined(LOG_ENABLED) && defined(IN_RING3)
4437 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4438 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4439 char szRegs[4096];
4440 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4441 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4442 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4443 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4444 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4445 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4446 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4447 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4448 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4449 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4450 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4451 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4452 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4453 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4454 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4455 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4456 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4457 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4458 " efer=%016VR{efer}\n"
4459 " pat=%016VR{pat}\n"
4460 " sf_mask=%016VR{sf_mask}\n"
4461 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4462 " lstar=%016VR{lstar}\n"
4463 " star=%016VR{star} cstar=%016VR{cstar}\n"
4464 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4465 );
4466
4467 char szInstr[256];
4468 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4469 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4470 szInstr, sizeof(szInstr), NULL);
4471
4472 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4473#else
4474 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4475#endif
4476}
4477
4478/**
4479 * Complains about a stub.
4480 *
4481 * Providing two versions of this macro, one for daily use and one for use when
4482 * working on IEM.
4483 */
4484#if 0
4485# define IEMOP_BITCH_ABOUT_STUB() \
4486 do { \
4487 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4488 iemOpStubMsg2(pIemCpu); \
4489 RTAssertPanic(); \
4490 } while (0)
4491#else
4492# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4493#endif
4494
4495/** Stubs an opcode. */
4496#define FNIEMOP_STUB(a_Name) \
4497 FNIEMOP_DEF(a_Name) \
4498 { \
4499 IEMOP_BITCH_ABOUT_STUB(); \
4500 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4501 } \
4502 typedef int ignore_semicolon
4503
4504/** Stubs an opcode. */
4505#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4506 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4507 { \
4508 IEMOP_BITCH_ABOUT_STUB(); \
4509 NOREF(a_Name0); \
4510 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4511 } \
4512 typedef int ignore_semicolon
4513
4514/** Stubs an opcode which currently should raise \#UD. */
4515#define FNIEMOP_UD_STUB(a_Name) \
4516 FNIEMOP_DEF(a_Name) \
4517 { \
4518 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4519 return IEMOP_RAISE_INVALID_OPCODE(); \
4520 } \
4521 typedef int ignore_semicolon
4522
4523/** Stubs an opcode which currently should raise \#UD. */
4524#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4525 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4526 { \
4527 NOREF(a_Name0); \
4528 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4529 return IEMOP_RAISE_INVALID_OPCODE(); \
4530 } \
4531 typedef int ignore_semicolon
4532
4533
4534
4535/** @name Register Access.
4536 * @{
4537 */
4538
4539/**
4540 * Gets a reference (pointer) to the specified hidden segment register.
4541 *
4542 * @returns Hidden register reference.
4543 * @param pIemCpu The per CPU data.
4544 * @param iSegReg The segment register.
4545 */
4546IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4547{
4548 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4549 PCPUMSELREG pSReg;
4550 switch (iSegReg)
4551 {
4552 case X86_SREG_ES: pSReg = &pCtx->es; break;
4553 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4554 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4555 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4556 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4557 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4558 default:
4559 AssertFailedReturn(NULL);
4560 }
4561#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4562 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4563 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4564#else
4565 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4566#endif
4567 return pSReg;
4568}
4569
4570
4571/**
4572 * Gets a reference (pointer) to the specified segment register (the selector
4573 * value).
4574 *
4575 * @returns Pointer to the selector variable.
4576 * @param pIemCpu The per CPU data.
4577 * @param iSegReg The segment register.
4578 */
4579IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4580{
4581 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4582 switch (iSegReg)
4583 {
4584 case X86_SREG_ES: return &pCtx->es.Sel;
4585 case X86_SREG_CS: return &pCtx->cs.Sel;
4586 case X86_SREG_SS: return &pCtx->ss.Sel;
4587 case X86_SREG_DS: return &pCtx->ds.Sel;
4588 case X86_SREG_FS: return &pCtx->fs.Sel;
4589 case X86_SREG_GS: return &pCtx->gs.Sel;
4590 }
4591 AssertFailedReturn(NULL);
4592}
4593
4594
4595/**
4596 * Fetches the selector value of a segment register.
4597 *
4598 * @returns The selector value.
4599 * @param pIemCpu The per CPU data.
4600 * @param iSegReg The segment register.
4601 */
4602IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4603{
4604 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4605 switch (iSegReg)
4606 {
4607 case X86_SREG_ES: return pCtx->es.Sel;
4608 case X86_SREG_CS: return pCtx->cs.Sel;
4609 case X86_SREG_SS: return pCtx->ss.Sel;
4610 case X86_SREG_DS: return pCtx->ds.Sel;
4611 case X86_SREG_FS: return pCtx->fs.Sel;
4612 case X86_SREG_GS: return pCtx->gs.Sel;
4613 }
4614 AssertFailedReturn(0xffff);
4615}
4616
4617
4618/**
4619 * Gets a reference (pointer) to the specified general register.
4620 *
4621 * @returns Register reference.
4622 * @param pIemCpu The per CPU data.
4623 * @param iReg The general register.
4624 */
4625IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4626{
4627 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4628 switch (iReg)
4629 {
4630 case X86_GREG_xAX: return &pCtx->rax;
4631 case X86_GREG_xCX: return &pCtx->rcx;
4632 case X86_GREG_xDX: return &pCtx->rdx;
4633 case X86_GREG_xBX: return &pCtx->rbx;
4634 case X86_GREG_xSP: return &pCtx->rsp;
4635 case X86_GREG_xBP: return &pCtx->rbp;
4636 case X86_GREG_xSI: return &pCtx->rsi;
4637 case X86_GREG_xDI: return &pCtx->rdi;
4638 case X86_GREG_x8: return &pCtx->r8;
4639 case X86_GREG_x9: return &pCtx->r9;
4640 case X86_GREG_x10: return &pCtx->r10;
4641 case X86_GREG_x11: return &pCtx->r11;
4642 case X86_GREG_x12: return &pCtx->r12;
4643 case X86_GREG_x13: return &pCtx->r13;
4644 case X86_GREG_x14: return &pCtx->r14;
4645 case X86_GREG_x15: return &pCtx->r15;
4646 }
4647 AssertFailedReturn(NULL);
4648}
4649
4650
4651/**
4652 * Gets a reference (pointer) to the specified 8-bit general register.
4653 *
4654 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4655 *
4656 * @returns Register reference.
4657 * @param pIemCpu The per CPU data.
4658 * @param iReg The register.
4659 */
4660IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4661{
4662 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4663 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4664
4665 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4666 if (iReg >= 4)
4667 pu8Reg++;
4668 return pu8Reg;
4669}
4670
4671
4672/**
4673 * Fetches the value of a 8-bit general register.
4674 *
4675 * @returns The register value.
4676 * @param pIemCpu The per CPU data.
4677 * @param iReg The register.
4678 */
4679IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4680{
4681 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4682 return *pbSrc;
4683}
4684
4685
4686/**
4687 * Fetches the value of a 16-bit general register.
4688 *
4689 * @returns The register value.
4690 * @param pIemCpu The per CPU data.
4691 * @param iReg The register.
4692 */
4693IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4694{
4695 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4696}
4697
4698
4699/**
4700 * Fetches the value of a 32-bit general register.
4701 *
4702 * @returns The register value.
4703 * @param pIemCpu The per CPU data.
4704 * @param iReg The register.
4705 */
4706IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4707{
4708 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4709}
4710
4711
4712/**
4713 * Fetches the value of a 64-bit general register.
4714 *
4715 * @returns The register value.
4716 * @param pIemCpu The per CPU data.
4717 * @param iReg The register.
4718 */
4719IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4720{
4721 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4722}
4723
4724
4725/**
4726 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4727 *
4728 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4729 * segment limit.
4730 *
4731 * @param pIemCpu The per CPU data.
4732 * @param offNextInstr The offset of the next instruction.
4733 */
4734IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4735{
4736 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4737 switch (pIemCpu->enmEffOpSize)
4738 {
4739 case IEMMODE_16BIT:
4740 {
4741 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4742 if ( uNewIp > pCtx->cs.u32Limit
4743 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4744 return iemRaiseGeneralProtectionFault0(pIemCpu);
4745 pCtx->rip = uNewIp;
4746 break;
4747 }
4748
4749 case IEMMODE_32BIT:
4750 {
4751 Assert(pCtx->rip <= UINT32_MAX);
4752 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4753
4754 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4755 if (uNewEip > pCtx->cs.u32Limit)
4756 return iemRaiseGeneralProtectionFault0(pIemCpu);
4757 pCtx->rip = uNewEip;
4758 break;
4759 }
4760
4761 case IEMMODE_64BIT:
4762 {
4763 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4764
4765 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4766 if (!IEM_IS_CANONICAL(uNewRip))
4767 return iemRaiseGeneralProtectionFault0(pIemCpu);
4768 pCtx->rip = uNewRip;
4769 break;
4770 }
4771
4772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4773 }
4774
4775 pCtx->eflags.Bits.u1RF = 0;
4776 return VINF_SUCCESS;
4777}
4778
4779
4780/**
4781 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4782 *
4783 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4784 * segment limit.
4785 *
4786 * @returns Strict VBox status code.
4787 * @param pIemCpu The per CPU data.
4788 * @param offNextInstr The offset of the next instruction.
4789 */
4790IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4791{
4792 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4793 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4794
4795 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4796 if ( uNewIp > pCtx->cs.u32Limit
4797 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4798 return iemRaiseGeneralProtectionFault0(pIemCpu);
4799 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4800 pCtx->rip = uNewIp;
4801 pCtx->eflags.Bits.u1RF = 0;
4802
4803 return VINF_SUCCESS;
4804}
4805
4806
4807/**
4808 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4809 *
4810 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4811 * segment limit.
4812 *
4813 * @returns Strict VBox status code.
4814 * @param pIemCpu The per CPU data.
4815 * @param offNextInstr The offset of the next instruction.
4816 */
4817IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4818{
4819 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4820 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4821
4822 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4823 {
4824 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4825
4826 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4827 if (uNewEip > pCtx->cs.u32Limit)
4828 return iemRaiseGeneralProtectionFault0(pIemCpu);
4829 pCtx->rip = uNewEip;
4830 }
4831 else
4832 {
4833 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4834
4835 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4836 if (!IEM_IS_CANONICAL(uNewRip))
4837 return iemRaiseGeneralProtectionFault0(pIemCpu);
4838 pCtx->rip = uNewRip;
4839 }
4840 pCtx->eflags.Bits.u1RF = 0;
4841 return VINF_SUCCESS;
4842}
4843
4844
4845/**
4846 * Performs a near jump to the specified address.
4847 *
4848 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4849 * segment limit.
4850 *
4851 * @param pIemCpu The per CPU data.
4852 * @param uNewRip The new RIP value.
4853 */
4854IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4855{
4856 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4857 switch (pIemCpu->enmEffOpSize)
4858 {
4859 case IEMMODE_16BIT:
4860 {
4861 Assert(uNewRip <= UINT16_MAX);
4862 if ( uNewRip > pCtx->cs.u32Limit
4863 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4864 return iemRaiseGeneralProtectionFault0(pIemCpu);
4865 /** @todo Test 16-bit jump in 64-bit mode. */
4866 pCtx->rip = uNewRip;
4867 break;
4868 }
4869
4870 case IEMMODE_32BIT:
4871 {
4872 Assert(uNewRip <= UINT32_MAX);
4873 Assert(pCtx->rip <= UINT32_MAX);
4874 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4875
4876 if (uNewRip > pCtx->cs.u32Limit)
4877 return iemRaiseGeneralProtectionFault0(pIemCpu);
4878 pCtx->rip = uNewRip;
4879 break;
4880 }
4881
4882 case IEMMODE_64BIT:
4883 {
4884 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4885
4886 if (!IEM_IS_CANONICAL(uNewRip))
4887 return iemRaiseGeneralProtectionFault0(pIemCpu);
4888 pCtx->rip = uNewRip;
4889 break;
4890 }
4891
4892 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4893 }
4894
4895 pCtx->eflags.Bits.u1RF = 0;
4896 return VINF_SUCCESS;
4897}
4898
4899
4900/**
4901 * Get the address of the top of the stack.
4902 *
4903 * @param pIemCpu The per CPU data.
4904 * @param pCtx The CPU context which SP/ESP/RSP should be
4905 * read.
4906 */
4907DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4908{
4909 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4910 return pCtx->rsp;
4911 if (pCtx->ss.Attr.n.u1DefBig)
4912 return pCtx->esp;
4913 return pCtx->sp;
4914}
4915
4916
4917/**
4918 * Updates the RIP/EIP/IP to point to the next instruction.
4919 *
4920 * This function leaves the EFLAGS.RF flag alone.
4921 *
4922 * @param pIemCpu The per CPU data.
4923 * @param cbInstr The number of bytes to add.
4924 */
4925IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4926{
4927 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4928 switch (pIemCpu->enmCpuMode)
4929 {
4930 case IEMMODE_16BIT:
4931 Assert(pCtx->rip <= UINT16_MAX);
4932 pCtx->eip += cbInstr;
4933 pCtx->eip &= UINT32_C(0xffff);
4934 break;
4935
4936 case IEMMODE_32BIT:
4937 pCtx->eip += cbInstr;
4938 Assert(pCtx->rip <= UINT32_MAX);
4939 break;
4940
4941 case IEMMODE_64BIT:
4942 pCtx->rip += cbInstr;
4943 break;
4944 default: AssertFailed();
4945 }
4946}
4947
4948
4949#if 0
4950/**
4951 * Updates the RIP/EIP/IP to point to the next instruction.
4952 *
4953 * @param pIemCpu The per CPU data.
4954 */
4955IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4956{
4957 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4958}
4959#endif
4960
4961
4962
4963/**
4964 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4965 *
4966 * @param pIemCpu The per CPU data.
4967 * @param cbInstr The number of bytes to add.
4968 */
4969IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4970{
4971 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4972
4973 pCtx->eflags.Bits.u1RF = 0;
4974
4975 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4976 switch (pIemCpu->enmCpuMode)
4977 {
4978 /** @todo investigate if EIP or RIP is really incremented. */
4979 case IEMMODE_16BIT:
4980 case IEMMODE_32BIT:
4981 pCtx->eip += cbInstr;
4982 Assert(pCtx->rip <= UINT32_MAX);
4983 break;
4984
4985 case IEMMODE_64BIT:
4986 pCtx->rip += cbInstr;
4987 break;
4988 default: AssertFailed();
4989 }
4990}
4991
4992
4993/**
4994 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4995 *
4996 * @param pIemCpu The per CPU data.
4997 */
4998IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4999{
5000 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
5001}
5002
5003
5004/**
5005 * Adds to the stack pointer.
5006 *
5007 * @param pIemCpu The per CPU data.
5008 * @param pCtx The CPU context which SP/ESP/RSP should be
5009 * updated.
5010 * @param cbToAdd The number of bytes to add.
5011 */
5012DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
5013{
5014 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5015 pCtx->rsp += cbToAdd;
5016 else if (pCtx->ss.Attr.n.u1DefBig)
5017 pCtx->esp += cbToAdd;
5018 else
5019 pCtx->sp += cbToAdd;
5020}
5021
5022
5023/**
5024 * Subtracts from the stack pointer.
5025 *
5026 * @param pIemCpu The per CPU data.
5027 * @param pCtx The CPU context which SP/ESP/RSP should be
5028 * updated.
5029 * @param cbToSub The number of bytes to subtract.
5030 */
5031DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
5032{
5033 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5034 pCtx->rsp -= cbToSub;
5035 else if (pCtx->ss.Attr.n.u1DefBig)
5036 pCtx->esp -= cbToSub;
5037 else
5038 pCtx->sp -= cbToSub;
5039}
5040
5041
5042/**
5043 * Adds to the temporary stack pointer.
5044 *
5045 * @param pIemCpu The per CPU data.
5046 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5047 * @param cbToAdd The number of bytes to add.
5048 * @param pCtx Where to get the current stack mode.
5049 */
5050DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
5051{
5052 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5053 pTmpRsp->u += cbToAdd;
5054 else if (pCtx->ss.Attr.n.u1DefBig)
5055 pTmpRsp->DWords.dw0 += cbToAdd;
5056 else
5057 pTmpRsp->Words.w0 += cbToAdd;
5058}
5059
5060
5061/**
5062 * Subtracts from the temporary stack pointer.
5063 *
5064 * @param pIemCpu The per CPU data.
5065 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5066 * @param cbToSub The number of bytes to subtract.
5067 * @param pCtx Where to get the current stack mode.
5068 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5069 * expecting that.
5070 */
5071DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5072{
5073 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5074 pTmpRsp->u -= cbToSub;
5075 else if (pCtx->ss.Attr.n.u1DefBig)
5076 pTmpRsp->DWords.dw0 -= cbToSub;
5077 else
5078 pTmpRsp->Words.w0 -= cbToSub;
5079}
5080
5081
5082/**
5083 * Calculates the effective stack address for a push of the specified size as
5084 * well as the new RSP value (upper bits may be masked).
5085 *
5086 * @returns Effective stack addressf for the push.
5087 * @param pIemCpu The IEM per CPU data.
5088 * @param pCtx Where to get the current stack mode.
5089 * @param cbItem The size of the stack item to pop.
5090 * @param puNewRsp Where to return the new RSP value.
5091 */
5092DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5093{
5094 RTUINT64U uTmpRsp;
5095 RTGCPTR GCPtrTop;
5096 uTmpRsp.u = pCtx->rsp;
5097
5098 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5099 GCPtrTop = uTmpRsp.u -= cbItem;
5100 else if (pCtx->ss.Attr.n.u1DefBig)
5101 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5102 else
5103 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5104 *puNewRsp = uTmpRsp.u;
5105 return GCPtrTop;
5106}
5107
5108
5109/**
5110 * Gets the current stack pointer and calculates the value after a pop of the
5111 * specified size.
5112 *
5113 * @returns Current stack pointer.
5114 * @param pIemCpu The per CPU data.
5115 * @param pCtx Where to get the current stack mode.
5116 * @param cbItem The size of the stack item to pop.
5117 * @param puNewRsp Where to return the new RSP value.
5118 */
5119DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5120{
5121 RTUINT64U uTmpRsp;
5122 RTGCPTR GCPtrTop;
5123 uTmpRsp.u = pCtx->rsp;
5124
5125 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5126 {
5127 GCPtrTop = uTmpRsp.u;
5128 uTmpRsp.u += cbItem;
5129 }
5130 else if (pCtx->ss.Attr.n.u1DefBig)
5131 {
5132 GCPtrTop = uTmpRsp.DWords.dw0;
5133 uTmpRsp.DWords.dw0 += cbItem;
5134 }
5135 else
5136 {
5137 GCPtrTop = uTmpRsp.Words.w0;
5138 uTmpRsp.Words.w0 += cbItem;
5139 }
5140 *puNewRsp = uTmpRsp.u;
5141 return GCPtrTop;
5142}
5143
5144
5145/**
5146 * Calculates the effective stack address for a push of the specified size as
5147 * well as the new temporary RSP value (upper bits may be masked).
5148 *
5149 * @returns Effective stack addressf for the push.
5150 * @param pIemCpu The per CPU data.
5151 * @param pCtx Where to get the current stack mode.
5152 * @param pTmpRsp The temporary stack pointer. This is updated.
5153 * @param cbItem The size of the stack item to pop.
5154 */
5155DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5156{
5157 RTGCPTR GCPtrTop;
5158
5159 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5160 GCPtrTop = pTmpRsp->u -= cbItem;
5161 else if (pCtx->ss.Attr.n.u1DefBig)
5162 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5163 else
5164 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5165 return GCPtrTop;
5166}
5167
5168
5169/**
5170 * Gets the effective stack address for a pop of the specified size and
5171 * calculates and updates the temporary RSP.
5172 *
5173 * @returns Current stack pointer.
5174 * @param pIemCpu The per CPU data.
5175 * @param pCtx Where to get the current stack mode.
5176 * @param pTmpRsp The temporary stack pointer. This is updated.
5177 * @param cbItem The size of the stack item to pop.
5178 */
5179DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5180{
5181 RTGCPTR GCPtrTop;
5182 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5183 {
5184 GCPtrTop = pTmpRsp->u;
5185 pTmpRsp->u += cbItem;
5186 }
5187 else if (pCtx->ss.Attr.n.u1DefBig)
5188 {
5189 GCPtrTop = pTmpRsp->DWords.dw0;
5190 pTmpRsp->DWords.dw0 += cbItem;
5191 }
5192 else
5193 {
5194 GCPtrTop = pTmpRsp->Words.w0;
5195 pTmpRsp->Words.w0 += cbItem;
5196 }
5197 return GCPtrTop;
5198}
5199
5200/** @} */
5201
5202
5203/** @name FPU access and helpers.
5204 *
5205 * @{
5206 */
5207
5208
5209/**
5210 * Hook for preparing to use the host FPU.
5211 *
5212 * This is necessary in ring-0 and raw-mode context.
5213 *
5214 * @param pIemCpu The IEM per CPU data.
5215 */
5216DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5217{
5218#ifdef IN_RING3
5219 NOREF(pIemCpu);
5220#else
5221/** @todo RZ: FIXME */
5222//# error "Implement me"
5223#endif
5224}
5225
5226
5227/**
5228 * Hook for preparing to use the host FPU for SSE
5229 *
5230 * This is necessary in ring-0 and raw-mode context.
5231 *
5232 * @param pIemCpu The IEM per CPU data.
5233 */
5234DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5235{
5236 iemFpuPrepareUsage(pIemCpu);
5237}
5238
5239
5240/**
5241 * Stores a QNaN value into a FPU register.
5242 *
5243 * @param pReg Pointer to the register.
5244 */
5245DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5246{
5247 pReg->au32[0] = UINT32_C(0x00000000);
5248 pReg->au32[1] = UINT32_C(0xc0000000);
5249 pReg->au16[4] = UINT16_C(0xffff);
5250}
5251
5252
5253/**
5254 * Updates the FOP, FPU.CS and FPUIP registers.
5255 *
5256 * @param pIemCpu The IEM per CPU data.
5257 * @param pCtx The CPU context.
5258 * @param pFpuCtx The FPU context.
5259 */
5260DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5261{
5262 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5263 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5264 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5265 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5266 {
5267 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5268 * happens in real mode here based on the fnsave and fnstenv images. */
5269 pFpuCtx->CS = 0;
5270 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5271 }
5272 else
5273 {
5274 pFpuCtx->CS = pCtx->cs.Sel;
5275 pFpuCtx->FPUIP = pCtx->rip;
5276 }
5277}
5278
5279
5280/**
5281 * Updates the x87.DS and FPUDP registers.
5282 *
5283 * @param pIemCpu The IEM per CPU data.
5284 * @param pCtx The CPU context.
5285 * @param pFpuCtx The FPU context.
5286 * @param iEffSeg The effective segment register.
5287 * @param GCPtrEff The effective address relative to @a iEffSeg.
5288 */
5289DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5290{
5291 RTSEL sel;
5292 switch (iEffSeg)
5293 {
5294 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5295 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5296 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5297 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5298 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5299 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5300 default:
5301 AssertMsgFailed(("%d\n", iEffSeg));
5302 sel = pCtx->ds.Sel;
5303 }
5304 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5305 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5306 {
5307 pFpuCtx->DS = 0;
5308 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5309 }
5310 else
5311 {
5312 pFpuCtx->DS = sel;
5313 pFpuCtx->FPUDP = GCPtrEff;
5314 }
5315}
5316
5317
5318/**
5319 * Rotates the stack registers in the push direction.
5320 *
5321 * @param pFpuCtx The FPU context.
5322 * @remarks This is a complete waste of time, but fxsave stores the registers in
5323 * stack order.
5324 */
5325DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5326{
5327 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5328 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5329 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5330 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5331 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5332 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5333 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5334 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5335 pFpuCtx->aRegs[0].r80 = r80Tmp;
5336}
5337
5338
5339/**
5340 * Rotates the stack registers in the pop direction.
5341 *
5342 * @param pFpuCtx The FPU context.
5343 * @remarks This is a complete waste of time, but fxsave stores the registers in
5344 * stack order.
5345 */
5346DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5347{
5348 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5349 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5350 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5351 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5352 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5353 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5354 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5355 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5356 pFpuCtx->aRegs[7].r80 = r80Tmp;
5357}
5358
5359
5360/**
5361 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5362 * exception prevents it.
5363 *
5364 * @param pIemCpu The IEM per CPU data.
5365 * @param pResult The FPU operation result to push.
5366 * @param pFpuCtx The FPU context.
5367 */
5368IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5369{
5370 /* Update FSW and bail if there are pending exceptions afterwards. */
5371 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5372 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5373 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5374 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5375 {
5376 pFpuCtx->FSW = fFsw;
5377 return;
5378 }
5379
5380 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5381 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5382 {
5383 /* All is fine, push the actual value. */
5384 pFpuCtx->FTW |= RT_BIT(iNewTop);
5385 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5386 }
5387 else if (pFpuCtx->FCW & X86_FCW_IM)
5388 {
5389 /* Masked stack overflow, push QNaN. */
5390 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5391 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5392 }
5393 else
5394 {
5395 /* Raise stack overflow, don't push anything. */
5396 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5397 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5398 return;
5399 }
5400
5401 fFsw &= ~X86_FSW_TOP_MASK;
5402 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5403 pFpuCtx->FSW = fFsw;
5404
5405 iemFpuRotateStackPush(pFpuCtx);
5406}
5407
5408
5409/**
5410 * Stores a result in a FPU register and updates the FSW and FTW.
5411 *
5412 * @param pFpuCtx The FPU context.
5413 * @param pResult The result to store.
5414 * @param iStReg Which FPU register to store it in.
5415 */
5416IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5417{
5418 Assert(iStReg < 8);
5419 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5420 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5421 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5422 pFpuCtx->FTW |= RT_BIT(iReg);
5423 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5424}
5425
5426
5427/**
5428 * Only updates the FPU status word (FSW) with the result of the current
5429 * instruction.
5430 *
5431 * @param pFpuCtx The FPU context.
5432 * @param u16FSW The FSW output of the current instruction.
5433 */
5434IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5435{
5436 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5437 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5438}
5439
5440
5441/**
5442 * Pops one item off the FPU stack if no pending exception prevents it.
5443 *
5444 * @param pFpuCtx The FPU context.
5445 */
5446IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5447{
5448 /* Check pending exceptions. */
5449 uint16_t uFSW = pFpuCtx->FSW;
5450 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5451 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5452 return;
5453
5454 /* TOP--. */
5455 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5456 uFSW &= ~X86_FSW_TOP_MASK;
5457 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5458 pFpuCtx->FSW = uFSW;
5459
5460 /* Mark the previous ST0 as empty. */
5461 iOldTop >>= X86_FSW_TOP_SHIFT;
5462 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5463
5464 /* Rotate the registers. */
5465 iemFpuRotateStackPop(pFpuCtx);
5466}
5467
5468
5469/**
5470 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5471 *
5472 * @param pIemCpu The IEM per CPU data.
5473 * @param pResult The FPU operation result to push.
5474 */
5475IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5476{
5477 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5478 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5479 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5480 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5481}
5482
5483
5484/**
5485 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5486 * and sets FPUDP and FPUDS.
5487 *
5488 * @param pIemCpu The IEM per CPU data.
5489 * @param pResult The FPU operation result to push.
5490 * @param iEffSeg The effective segment register.
5491 * @param GCPtrEff The effective address relative to @a iEffSeg.
5492 */
5493IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5494{
5495 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5496 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5497 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5498 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5499 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5500}
5501
5502
5503/**
5504 * Replace ST0 with the first value and push the second onto the FPU stack,
5505 * unless a pending exception prevents it.
5506 *
5507 * @param pIemCpu The IEM per CPU data.
5508 * @param pResult The FPU operation result to store and push.
5509 */
5510IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5511{
5512 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5513 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5514 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5515
5516 /* Update FSW and bail if there are pending exceptions afterwards. */
5517 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5518 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5519 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5520 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5521 {
5522 pFpuCtx->FSW = fFsw;
5523 return;
5524 }
5525
5526 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5527 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5528 {
5529 /* All is fine, push the actual value. */
5530 pFpuCtx->FTW |= RT_BIT(iNewTop);
5531 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5532 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5533 }
5534 else if (pFpuCtx->FCW & X86_FCW_IM)
5535 {
5536 /* Masked stack overflow, push QNaN. */
5537 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5538 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5539 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5540 }
5541 else
5542 {
5543 /* Raise stack overflow, don't push anything. */
5544 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5545 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5546 return;
5547 }
5548
5549 fFsw &= ~X86_FSW_TOP_MASK;
5550 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5551 pFpuCtx->FSW = fFsw;
5552
5553 iemFpuRotateStackPush(pFpuCtx);
5554}
5555
5556
5557/**
5558 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5559 * FOP.
5560 *
5561 * @param pIemCpu The IEM per CPU data.
5562 * @param pResult The result to store.
5563 * @param iStReg Which FPU register to store it in.
5564 */
5565IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5566{
5567 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5568 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5569 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5570 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5571}
5572
5573
5574/**
5575 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5576 * FOP, and then pops the stack.
5577 *
5578 * @param pIemCpu The IEM per CPU data.
5579 * @param pResult The result to store.
5580 * @param iStReg Which FPU register to store it in.
5581 */
5582IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5583{
5584 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5585 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5586 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5587 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5588 iemFpuMaybePopOne(pFpuCtx);
5589}
5590
5591
5592/**
5593 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5594 * FPUDP, and FPUDS.
5595 *
5596 * @param pIemCpu The IEM per CPU data.
5597 * @param pResult The result to store.
5598 * @param iStReg Which FPU register to store it in.
5599 * @param iEffSeg The effective memory operand selector register.
5600 * @param GCPtrEff The effective memory operand offset.
5601 */
5602IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5603 uint8_t iEffSeg, RTGCPTR GCPtrEff)
5604{
5605 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5606 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5607 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5608 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5609 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5610}
5611
5612
5613/**
5614 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5615 * FPUDP, and FPUDS, and then pops the stack.
5616 *
5617 * @param pIemCpu The IEM per CPU data.
5618 * @param pResult The result to store.
5619 * @param iStReg Which FPU register to store it in.
5620 * @param iEffSeg The effective memory operand selector register.
5621 * @param GCPtrEff The effective memory operand offset.
5622 */
5623IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5624 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5625{
5626 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5627 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5628 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5629 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5630 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5631 iemFpuMaybePopOne(pFpuCtx);
5632}
5633
5634
5635/**
5636 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5637 *
5638 * @param pIemCpu The IEM per CPU data.
5639 */
5640IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5641{
5642 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5643 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5644 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5645}
5646
5647
5648/**
5649 * Marks the specified stack register as free (for FFREE).
5650 *
5651 * @param pIemCpu The IEM per CPU data.
5652 * @param iStReg The register to free.
5653 */
5654IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5655{
5656 Assert(iStReg < 8);
5657 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5658 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5659 pFpuCtx->FTW &= ~RT_BIT(iReg);
5660}
5661
5662
5663/**
5664 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5665 *
5666 * @param pIemCpu The IEM per CPU data.
5667 */
5668IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5669{
5670 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5671 uint16_t uFsw = pFpuCtx->FSW;
5672 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5673 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5674 uFsw &= ~X86_FSW_TOP_MASK;
5675 uFsw |= uTop;
5676 pFpuCtx->FSW = uFsw;
5677}
5678
5679
5680/**
5681 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5682 *
5683 * @param pIemCpu The IEM per CPU data.
5684 */
5685IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5686{
5687 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5688 uint16_t uFsw = pFpuCtx->FSW;
5689 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5690 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5691 uFsw &= ~X86_FSW_TOP_MASK;
5692 uFsw |= uTop;
5693 pFpuCtx->FSW = uFsw;
5694}
5695
5696
5697/**
5698 * Updates the FSW, FOP, FPUIP, and FPUCS.
5699 *
5700 * @param pIemCpu The IEM per CPU data.
5701 * @param u16FSW The FSW from the current instruction.
5702 */
5703IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5704{
5705 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5706 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5707 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5708 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5709}
5710
5711
5712/**
5713 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5714 *
5715 * @param pIemCpu The IEM per CPU data.
5716 * @param u16FSW The FSW from the current instruction.
5717 */
5718IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5719{
5720 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5721 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5722 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5723 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5724 iemFpuMaybePopOne(pFpuCtx);
5725}
5726
5727
5728/**
5729 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5730 *
5731 * @param pIemCpu The IEM per CPU data.
5732 * @param u16FSW The FSW from the current instruction.
5733 * @param iEffSeg The effective memory operand selector register.
5734 * @param GCPtrEff The effective memory operand offset.
5735 */
5736IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5737{
5738 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5739 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5740 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5741 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5742 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5743}
5744
5745
5746/**
5747 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5748 *
5749 * @param pIemCpu The IEM per CPU data.
5750 * @param u16FSW The FSW from the current instruction.
5751 */
5752IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5753{
5754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5755 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5756 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5757 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5758 iemFpuMaybePopOne(pFpuCtx);
5759 iemFpuMaybePopOne(pFpuCtx);
5760}
5761
5762
5763/**
5764 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5765 *
5766 * @param pIemCpu The IEM per CPU data.
5767 * @param u16FSW The FSW from the current instruction.
5768 * @param iEffSeg The effective memory operand selector register.
5769 * @param GCPtrEff The effective memory operand offset.
5770 */
5771IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5772{
5773 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5774 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5775 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5776 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5777 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5778 iemFpuMaybePopOne(pFpuCtx);
5779}
5780
5781
5782/**
5783 * Worker routine for raising an FPU stack underflow exception.
5784 *
5785 * @param pIemCpu The IEM per CPU data.
5786 * @param pFpuCtx The FPU context.
5787 * @param iStReg The stack register being accessed.
5788 */
5789IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5790{
5791 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5792 if (pFpuCtx->FCW & X86_FCW_IM)
5793 {
5794 /* Masked underflow. */
5795 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5796 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5797 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5798 if (iStReg != UINT8_MAX)
5799 {
5800 pFpuCtx->FTW |= RT_BIT(iReg);
5801 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5802 }
5803 }
5804 else
5805 {
5806 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5807 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5808 }
5809}
5810
5811
5812/**
5813 * Raises a FPU stack underflow exception.
5814 *
5815 * @param pIemCpu The IEM per CPU data.
5816 * @param iStReg The destination register that should be loaded
5817 * with QNaN if \#IS is not masked. Specify
5818 * UINT8_MAX if none (like for fcom).
5819 */
5820DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5821{
5822 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5823 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5824 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5825 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5826}
5827
5828
5829DECL_NO_INLINE(IEM_STATIC, void)
5830iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5831{
5832 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5833 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5834 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5835 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5836 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5837}
5838
5839
5840DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5841{
5842 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5843 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5844 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5845 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5846 iemFpuMaybePopOne(pFpuCtx);
5847}
5848
5849
5850DECL_NO_INLINE(IEM_STATIC, void)
5851iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5852{
5853 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5854 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5855 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5856 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5857 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5858 iemFpuMaybePopOne(pFpuCtx);
5859}
5860
5861
5862DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5863{
5864 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5865 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5866 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5867 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5868 iemFpuMaybePopOne(pFpuCtx);
5869 iemFpuMaybePopOne(pFpuCtx);
5870}
5871
5872
5873DECL_NO_INLINE(IEM_STATIC, void)
5874iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5875{
5876 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5877 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5878 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5879
5880 if (pFpuCtx->FCW & X86_FCW_IM)
5881 {
5882 /* Masked overflow - Push QNaN. */
5883 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5884 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5885 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5886 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5887 pFpuCtx->FTW |= RT_BIT(iNewTop);
5888 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5889 iemFpuRotateStackPush(pFpuCtx);
5890 }
5891 else
5892 {
5893 /* Exception pending - don't change TOP or the register stack. */
5894 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5895 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5896 }
5897}
5898
5899
5900DECL_NO_INLINE(IEM_STATIC, void)
5901iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5902{
5903 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5904 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5905 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5906
5907 if (pFpuCtx->FCW & X86_FCW_IM)
5908 {
5909 /* Masked overflow - Push QNaN. */
5910 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5911 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5912 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5913 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5914 pFpuCtx->FTW |= RT_BIT(iNewTop);
5915 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5916 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5917 iemFpuRotateStackPush(pFpuCtx);
5918 }
5919 else
5920 {
5921 /* Exception pending - don't change TOP or the register stack. */
5922 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5923 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5924 }
5925}
5926
5927
5928/**
5929 * Worker routine for raising an FPU stack overflow exception on a push.
5930 *
5931 * @param pFpuCtx The FPU context.
5932 */
5933IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5934{
5935 if (pFpuCtx->FCW & X86_FCW_IM)
5936 {
5937 /* Masked overflow. */
5938 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5939 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5940 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5941 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5942 pFpuCtx->FTW |= RT_BIT(iNewTop);
5943 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5944 iemFpuRotateStackPush(pFpuCtx);
5945 }
5946 else
5947 {
5948 /* Exception pending - don't change TOP or the register stack. */
5949 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5950 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5951 }
5952}
5953
5954
5955/**
5956 * Raises a FPU stack overflow exception on a push.
5957 *
5958 * @param pIemCpu The IEM per CPU data.
5959 */
5960DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5961{
5962 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5963 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5964 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5965 iemFpuStackPushOverflowOnly(pFpuCtx);
5966}
5967
5968
5969/**
5970 * Raises a FPU stack overflow exception on a push with a memory operand.
5971 *
5972 * @param pIemCpu The IEM per CPU data.
5973 * @param iEffSeg The effective memory operand selector register.
5974 * @param GCPtrEff The effective memory operand offset.
5975 */
5976DECL_NO_INLINE(IEM_STATIC, void)
5977iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5978{
5979 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5980 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5981 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5982 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5983 iemFpuStackPushOverflowOnly(pFpuCtx);
5984}
5985
5986
5987IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5988{
5989 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5990 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5991 if (pFpuCtx->FTW & RT_BIT(iReg))
5992 return VINF_SUCCESS;
5993 return VERR_NOT_FOUND;
5994}
5995
5996
5997IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5998{
5999 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6000 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6001 if (pFpuCtx->FTW & RT_BIT(iReg))
6002 {
6003 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
6004 return VINF_SUCCESS;
6005 }
6006 return VERR_NOT_FOUND;
6007}
6008
6009
6010IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
6011 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
6012{
6013 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6014 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6015 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6016 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6017 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6018 {
6019 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6020 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
6021 return VINF_SUCCESS;
6022 }
6023 return VERR_NOT_FOUND;
6024}
6025
6026
6027IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
6028{
6029 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6030 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6031 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6032 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6033 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6034 {
6035 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6036 return VINF_SUCCESS;
6037 }
6038 return VERR_NOT_FOUND;
6039}
6040
6041
6042/**
6043 * Updates the FPU exception status after FCW is changed.
6044 *
6045 * @param pFpuCtx The FPU context.
6046 */
6047IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
6048{
6049 uint16_t u16Fsw = pFpuCtx->FSW;
6050 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
6051 u16Fsw |= X86_FSW_ES | X86_FSW_B;
6052 else
6053 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
6054 pFpuCtx->FSW = u16Fsw;
6055}
6056
6057
6058/**
6059 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6060 *
6061 * @returns The full FTW.
6062 * @param pFpuCtx The FPU context.
6063 */
6064IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6065{
6066 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6067 uint16_t u16Ftw = 0;
6068 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6069 for (unsigned iSt = 0; iSt < 8; iSt++)
6070 {
6071 unsigned const iReg = (iSt + iTop) & 7;
6072 if (!(u8Ftw & RT_BIT(iReg)))
6073 u16Ftw |= 3 << (iReg * 2); /* empty */
6074 else
6075 {
6076 uint16_t uTag;
6077 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6078 if (pr80Reg->s.uExponent == 0x7fff)
6079 uTag = 2; /* Exponent is all 1's => Special. */
6080 else if (pr80Reg->s.uExponent == 0x0000)
6081 {
6082 if (pr80Reg->s.u64Mantissa == 0x0000)
6083 uTag = 1; /* All bits are zero => Zero. */
6084 else
6085 uTag = 2; /* Must be special. */
6086 }
6087 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6088 uTag = 0; /* Valid. */
6089 else
6090 uTag = 2; /* Must be special. */
6091
6092 u16Ftw |= uTag << (iReg * 2); /* empty */
6093 }
6094 }
6095
6096 return u16Ftw;
6097}
6098
6099
6100/**
6101 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6102 *
6103 * @returns The compressed FTW.
6104 * @param u16FullFtw The full FTW to convert.
6105 */
6106IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6107{
6108 uint8_t u8Ftw = 0;
6109 for (unsigned i = 0; i < 8; i++)
6110 {
6111 if ((u16FullFtw & 3) != 3 /*empty*/)
6112 u8Ftw |= RT_BIT(i);
6113 u16FullFtw >>= 2;
6114 }
6115
6116 return u8Ftw;
6117}
6118
6119/** @} */
6120
6121
6122/** @name Memory access.
6123 *
6124 * @{
6125 */
6126
6127
6128/**
6129 * Updates the IEMCPU::cbWritten counter if applicable.
6130 *
6131 * @param pIemCpu The IEM per CPU data.
6132 * @param fAccess The access being accounted for.
6133 * @param cbMem The access size.
6134 */
6135DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6136{
6137 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6138 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6139 pIemCpu->cbWritten += (uint32_t)cbMem;
6140}
6141
6142
6143/**
6144 * Checks if the given segment can be written to, raise the appropriate
6145 * exception if not.
6146 *
6147 * @returns VBox strict status code.
6148 *
6149 * @param pIemCpu The IEM per CPU data.
6150 * @param pHid Pointer to the hidden register.
6151 * @param iSegReg The register number.
6152 * @param pu64BaseAddr Where to return the base address to use for the
6153 * segment. (In 64-bit code it may differ from the
6154 * base in the hidden segment.)
6155 */
6156IEM_STATIC VBOXSTRICTRC
6157iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6158{
6159 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6160 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6161 else
6162 {
6163 if (!pHid->Attr.n.u1Present)
6164 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6165
6166 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6167 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6168 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6169 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6170 *pu64BaseAddr = pHid->u64Base;
6171 }
6172 return VINF_SUCCESS;
6173}
6174
6175
6176/**
6177 * Checks if the given segment can be read from, raise the appropriate
6178 * exception if not.
6179 *
6180 * @returns VBox strict status code.
6181 *
6182 * @param pIemCpu The IEM per CPU data.
6183 * @param pHid Pointer to the hidden register.
6184 * @param iSegReg The register number.
6185 * @param pu64BaseAddr Where to return the base address to use for the
6186 * segment. (In 64-bit code it may differ from the
6187 * base in the hidden segment.)
6188 */
6189IEM_STATIC VBOXSTRICTRC
6190iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6191{
6192 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6193 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6194 else
6195 {
6196 if (!pHid->Attr.n.u1Present)
6197 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6198
6199 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6200 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6201 *pu64BaseAddr = pHid->u64Base;
6202 }
6203 return VINF_SUCCESS;
6204}
6205
6206
6207/**
6208 * Applies the segment limit, base and attributes.
6209 *
6210 * This may raise a \#GP or \#SS.
6211 *
6212 * @returns VBox strict status code.
6213 *
6214 * @param pIemCpu The IEM per CPU data.
6215 * @param fAccess The kind of access which is being performed.
6216 * @param iSegReg The index of the segment register to apply.
6217 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6218 * TSS, ++).
6219 * @param cbMem The access size.
6220 * @param pGCPtrMem Pointer to the guest memory address to apply
6221 * segmentation to. Input and output parameter.
6222 */
6223IEM_STATIC VBOXSTRICTRC
6224iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6225{
6226 if (iSegReg == UINT8_MAX)
6227 return VINF_SUCCESS;
6228
6229 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6230 switch (pIemCpu->enmCpuMode)
6231 {
6232 case IEMMODE_16BIT:
6233 case IEMMODE_32BIT:
6234 {
6235 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6236 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6237
6238 Assert(pSel->Attr.n.u1Present);
6239 Assert(pSel->Attr.n.u1DescType);
6240 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6241 {
6242 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6243 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6244 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6245
6246 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6247 {
6248 /** @todo CPL check. */
6249 }
6250
6251 /*
6252 * There are two kinds of data selectors, normal and expand down.
6253 */
6254 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6255 {
6256 if ( GCPtrFirst32 > pSel->u32Limit
6257 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6258 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6259 }
6260 else
6261 {
6262 /*
6263 * The upper boundary is defined by the B bit, not the G bit!
6264 */
6265 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6266 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6267 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6268 }
6269 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6270 }
6271 else
6272 {
6273
6274 /*
6275 * Code selector and usually be used to read thru, writing is
6276 * only permitted in real and V8086 mode.
6277 */
6278 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6279 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6280 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6281 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6282 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6283
6284 if ( GCPtrFirst32 > pSel->u32Limit
6285 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6286 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6287
6288 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6289 {
6290 /** @todo CPL check. */
6291 }
6292
6293 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6294 }
6295 return VINF_SUCCESS;
6296 }
6297
6298 case IEMMODE_64BIT:
6299 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6300 *pGCPtrMem += pSel->u64Base;
6301 return VINF_SUCCESS;
6302
6303 default:
6304 AssertFailedReturn(VERR_IEM_IPE_7);
6305 }
6306}
6307
6308
6309/**
6310 * Translates a virtual address to a physical physical address and checks if we
6311 * can access the page as specified.
6312 *
6313 * @param pIemCpu The IEM per CPU data.
6314 * @param GCPtrMem The virtual address.
6315 * @param fAccess The intended access.
6316 * @param pGCPhysMem Where to return the physical address.
6317 */
6318IEM_STATIC VBOXSTRICTRC
6319iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6320{
6321 /** @todo Need a different PGM interface here. We're currently using
6322 * generic / REM interfaces. this won't cut it for R0 & RC. */
6323 RTGCPHYS GCPhys;
6324 uint64_t fFlags;
6325 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6326 if (RT_FAILURE(rc))
6327 {
6328 /** @todo Check unassigned memory in unpaged mode. */
6329 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6330 *pGCPhysMem = NIL_RTGCPHYS;
6331 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6332 }
6333
6334 /* If the page is writable and does not have the no-exec bit set, all
6335 access is allowed. Otherwise we'll have to check more carefully... */
6336 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6337 {
6338 /* Write to read only memory? */
6339 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6340 && !(fFlags & X86_PTE_RW)
6341 && ( pIemCpu->uCpl != 0
6342 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6343 {
6344 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6345 *pGCPhysMem = NIL_RTGCPHYS;
6346 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6347 }
6348
6349 /* Kernel memory accessed by userland? */
6350 if ( !(fFlags & X86_PTE_US)
6351 && pIemCpu->uCpl == 3
6352 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6353 {
6354 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6355 *pGCPhysMem = NIL_RTGCPHYS;
6356 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6357 }
6358
6359 /* Executing non-executable memory? */
6360 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6361 && (fFlags & X86_PTE_PAE_NX)
6362 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6363 {
6364 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6365 *pGCPhysMem = NIL_RTGCPHYS;
6366 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6367 VERR_ACCESS_DENIED);
6368 }
6369 }
6370
6371 /*
6372 * Set the dirty / access flags.
6373 * ASSUMES this is set when the address is translated rather than on committ...
6374 */
6375 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6376 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6377 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6378 {
6379 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6380 AssertRC(rc2);
6381 }
6382
6383 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6384 *pGCPhysMem = GCPhys;
6385 return VINF_SUCCESS;
6386}
6387
6388
6389
6390/**
6391 * Maps a physical page.
6392 *
6393 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6394 * @param pIemCpu The IEM per CPU data.
6395 * @param GCPhysMem The physical address.
6396 * @param fAccess The intended access.
6397 * @param ppvMem Where to return the mapping address.
6398 * @param pLock The PGM lock.
6399 */
6400IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6401{
6402#ifdef IEM_VERIFICATION_MODE_FULL
6403 /* Force the alternative path so we can ignore writes. */
6404 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6405 {
6406 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6407 {
6408 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6409 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6410 if (RT_FAILURE(rc2))
6411 pIemCpu->fProblematicMemory = true;
6412 }
6413 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6414 }
6415#endif
6416#ifdef IEM_LOG_MEMORY_WRITES
6417 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6418 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6419#endif
6420#ifdef IEM_VERIFICATION_MODE_MINIMAL
6421 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6422#endif
6423
6424 /** @todo This API may require some improving later. A private deal with PGM
6425 * regarding locking and unlocking needs to be struct. A couple of TLBs
6426 * living in PGM, but with publicly accessible inlined access methods
6427 * could perhaps be an even better solution. */
6428 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6429 GCPhysMem,
6430 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6431 pIemCpu->fBypassHandlers,
6432 ppvMem,
6433 pLock);
6434 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6435 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6436
6437#ifdef IEM_VERIFICATION_MODE_FULL
6438 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6439 pIemCpu->fProblematicMemory = true;
6440#endif
6441 return rc;
6442}
6443
6444
6445/**
6446 * Unmap a page previously mapped by iemMemPageMap.
6447 *
6448 * @param pIemCpu The IEM per CPU data.
6449 * @param GCPhysMem The physical address.
6450 * @param fAccess The intended access.
6451 * @param pvMem What iemMemPageMap returned.
6452 * @param pLock The PGM lock.
6453 */
6454DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6455{
6456 NOREF(pIemCpu);
6457 NOREF(GCPhysMem);
6458 NOREF(fAccess);
6459 NOREF(pvMem);
6460 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6461}
6462
6463
6464/**
6465 * Looks up a memory mapping entry.
6466 *
6467 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6468 * @param pIemCpu The IEM per CPU data.
6469 * @param pvMem The memory address.
6470 * @param fAccess The access to.
6471 */
6472DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6473{
6474 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
6475 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6476 if ( pIemCpu->aMemMappings[0].pv == pvMem
6477 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6478 return 0;
6479 if ( pIemCpu->aMemMappings[1].pv == pvMem
6480 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6481 return 1;
6482 if ( pIemCpu->aMemMappings[2].pv == pvMem
6483 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6484 return 2;
6485 return VERR_NOT_FOUND;
6486}
6487
6488
6489/**
6490 * Finds a free memmap entry when using iNextMapping doesn't work.
6491 *
6492 * @returns Memory mapping index, 1024 on failure.
6493 * @param pIemCpu The IEM per CPU data.
6494 */
6495IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6496{
6497 /*
6498 * The easy case.
6499 */
6500 if (pIemCpu->cActiveMappings == 0)
6501 {
6502 pIemCpu->iNextMapping = 1;
6503 return 0;
6504 }
6505
6506 /* There should be enough mappings for all instructions. */
6507 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6508
6509 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6510 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6511 return i;
6512
6513 AssertFailedReturn(1024);
6514}
6515
6516
6517/**
6518 * Commits a bounce buffer that needs writing back and unmaps it.
6519 *
6520 * @returns Strict VBox status code.
6521 * @param pIemCpu The IEM per CPU data.
6522 * @param iMemMap The index of the buffer to commit.
6523 */
6524IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6525{
6526 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6527 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6528
6529 /*
6530 * Do the writing.
6531 */
6532#ifndef IEM_VERIFICATION_MODE_MINIMAL
6533 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6534 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6535 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6536 {
6537 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6538 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6539 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6540 if (!pIemCpu->fBypassHandlers)
6541 {
6542 /*
6543 * Carefully and efficiently dealing with access handler return
6544 * codes make this a little bloated.
6545 */
6546 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6547 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6548 pbBuf,
6549 cbFirst,
6550 PGMACCESSORIGIN_IEM);
6551 if (rcStrict == VINF_SUCCESS)
6552 {
6553 if (cbSecond)
6554 {
6555 rcStrict = PGMPhysWrite(pVM,
6556 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6557 pbBuf + cbFirst,
6558 cbSecond,
6559 PGMACCESSORIGIN_IEM);
6560 if (rcStrict == VINF_SUCCESS)
6561 { /* nothing */ }
6562 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6563 {
6564 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6565 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6566 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6567 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6568 }
6569 else
6570 {
6571 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6572 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6573 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6574 return rcStrict;
6575 }
6576 }
6577 }
6578 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6579 {
6580 if (!cbSecond)
6581 {
6582 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6583 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6584 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6585 }
6586 else
6587 {
6588 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6589 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6590 pbBuf + cbFirst,
6591 cbSecond,
6592 PGMACCESSORIGIN_IEM);
6593 if (rcStrict2 == VINF_SUCCESS)
6594 {
6595 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6596 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6597 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6598 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6599 }
6600 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6601 {
6602 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6603 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6604 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6605 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6606 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6607 }
6608 else
6609 {
6610 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6611 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6612 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6613 return rcStrict2;
6614 }
6615 }
6616 }
6617 else
6618 {
6619 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6620 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6621 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6622 return rcStrict;
6623 }
6624 }
6625 else
6626 {
6627 /*
6628 * No access handlers, much simpler.
6629 */
6630 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6631 if (RT_SUCCESS(rc))
6632 {
6633 if (cbSecond)
6634 {
6635 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6636 if (RT_SUCCESS(rc))
6637 { /* likely */ }
6638 else
6639 {
6640 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6641 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6642 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6643 return rc;
6644 }
6645 }
6646 }
6647 else
6648 {
6649 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6650 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6651 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6652 return rc;
6653 }
6654 }
6655 }
6656#endif
6657
6658#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6659 /*
6660 * Record the write(s).
6661 */
6662 if (!pIemCpu->fNoRem)
6663 {
6664 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6665 if (pEvtRec)
6666 {
6667 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6668 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6669 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6670 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6671 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6672 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6673 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6674 }
6675 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6676 {
6677 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6678 if (pEvtRec)
6679 {
6680 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6681 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6682 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6683 memcpy(pEvtRec->u.RamWrite.ab,
6684 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6685 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6686 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6687 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6688 }
6689 }
6690 }
6691#endif
6692#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6693 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6694 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6695 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6696 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6697 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6698 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6699
6700 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6701 g_cbIemWrote = cbWrote;
6702 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6703#endif
6704
6705 /*
6706 * Free the mapping entry.
6707 */
6708 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6709 Assert(pIemCpu->cActiveMappings != 0);
6710 pIemCpu->cActiveMappings--;
6711 return VINF_SUCCESS;
6712}
6713
6714
6715/**
6716 * iemMemMap worker that deals with a request crossing pages.
6717 */
6718IEM_STATIC VBOXSTRICTRC
6719iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6720{
6721 /*
6722 * Do the address translations.
6723 */
6724 RTGCPHYS GCPhysFirst;
6725 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6726 if (rcStrict != VINF_SUCCESS)
6727 return rcStrict;
6728
6729/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6730 * last byte. */
6731 RTGCPHYS GCPhysSecond;
6732 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6733 if (rcStrict != VINF_SUCCESS)
6734 return rcStrict;
6735 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6736
6737 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6738#ifdef IEM_VERIFICATION_MODE_FULL
6739 /*
6740 * Detect problematic memory when verifying so we can select
6741 * the right execution engine. (TLB: Redo this.)
6742 */
6743 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6744 {
6745 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6746 if (RT_SUCCESS(rc2))
6747 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6748 if (RT_FAILURE(rc2))
6749 pIemCpu->fProblematicMemory = true;
6750 }
6751#endif
6752
6753
6754 /*
6755 * Read in the current memory content if it's a read, execute or partial
6756 * write access.
6757 */
6758 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6759 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6760 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6761
6762 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6763 {
6764 if (!pIemCpu->fBypassHandlers)
6765 {
6766 /*
6767 * Must carefully deal with access handler status codes here,
6768 * makes the code a bit bloated.
6769 */
6770 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6771 if (rcStrict == VINF_SUCCESS)
6772 {
6773 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6774 if (rcStrict == VINF_SUCCESS)
6775 { /*likely */ }
6776 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6777 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6778 else
6779 {
6780 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6781 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6782 return rcStrict;
6783 }
6784 }
6785 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6786 {
6787 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6788 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6789 {
6790 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6791 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6792 }
6793 else
6794 {
6795 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6796 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6797 return rcStrict2;
6798 }
6799 }
6800 else
6801 {
6802 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6803 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6804 return rcStrict;
6805 }
6806 }
6807 else
6808 {
6809 /*
6810 * No informational status codes here, much more straight forward.
6811 */
6812 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6813 if (RT_SUCCESS(rc))
6814 {
6815 Assert(rc == VINF_SUCCESS);
6816 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6817 if (RT_SUCCESS(rc))
6818 Assert(rc == VINF_SUCCESS);
6819 else
6820 {
6821 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6822 return rc;
6823 }
6824 }
6825 else
6826 {
6827 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6828 return rc;
6829 }
6830 }
6831
6832#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6833 if ( !pIemCpu->fNoRem
6834 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6835 {
6836 /*
6837 * Record the reads.
6838 */
6839 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6840 if (pEvtRec)
6841 {
6842 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6843 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6844 pEvtRec->u.RamRead.cb = cbFirstPage;
6845 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6846 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6847 }
6848 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6849 if (pEvtRec)
6850 {
6851 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6852 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6853 pEvtRec->u.RamRead.cb = cbSecondPage;
6854 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6855 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6856 }
6857 }
6858#endif
6859 }
6860#ifdef VBOX_STRICT
6861 else
6862 memset(pbBuf, 0xcc, cbMem);
6863 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6864 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6865#endif
6866
6867 /*
6868 * Commit the bounce buffer entry.
6869 */
6870 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6871 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6872 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6873 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6874 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6875 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6876 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6877 pIemCpu->iNextMapping = iMemMap + 1;
6878 pIemCpu->cActiveMappings++;
6879
6880 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6881 *ppvMem = pbBuf;
6882 return VINF_SUCCESS;
6883}
6884
6885
6886/**
6887 * iemMemMap woker that deals with iemMemPageMap failures.
6888 */
6889IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6890 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6891{
6892 /*
6893 * Filter out conditions we can handle and the ones which shouldn't happen.
6894 */
6895 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6896 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6897 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6898 {
6899 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6900 return rcMap;
6901 }
6902 pIemCpu->cPotentialExits++;
6903
6904 /*
6905 * Read in the current memory content if it's a read, execute or partial
6906 * write access.
6907 */
6908 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6909 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6910 {
6911 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6912 memset(pbBuf, 0xff, cbMem);
6913 else
6914 {
6915 int rc;
6916 if (!pIemCpu->fBypassHandlers)
6917 {
6918 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6919 if (rcStrict == VINF_SUCCESS)
6920 { /* nothing */ }
6921 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6922 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6923 else
6924 {
6925 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6926 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6927 return rcStrict;
6928 }
6929 }
6930 else
6931 {
6932 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6933 if (RT_SUCCESS(rc))
6934 { /* likely */ }
6935 else
6936 {
6937 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6938 GCPhysFirst, rc));
6939 return rc;
6940 }
6941 }
6942 }
6943
6944#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6945 if ( !pIemCpu->fNoRem
6946 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6947 {
6948 /*
6949 * Record the read.
6950 */
6951 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6952 if (pEvtRec)
6953 {
6954 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6955 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6956 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6957 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6958 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6959 }
6960 }
6961#endif
6962 }
6963#ifdef VBOX_STRICT
6964 else
6965 memset(pbBuf, 0xcc, cbMem);
6966#endif
6967#ifdef VBOX_STRICT
6968 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6969 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6970#endif
6971
6972 /*
6973 * Commit the bounce buffer entry.
6974 */
6975 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6976 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6977 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6978 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6979 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6980 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6981 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6982 pIemCpu->iNextMapping = iMemMap + 1;
6983 pIemCpu->cActiveMappings++;
6984
6985 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6986 *ppvMem = pbBuf;
6987 return VINF_SUCCESS;
6988}
6989
6990
6991
6992/**
6993 * Maps the specified guest memory for the given kind of access.
6994 *
6995 * This may be using bounce buffering of the memory if it's crossing a page
6996 * boundary or if there is an access handler installed for any of it. Because
6997 * of lock prefix guarantees, we're in for some extra clutter when this
6998 * happens.
6999 *
7000 * This may raise a \#GP, \#SS, \#PF or \#AC.
7001 *
7002 * @returns VBox strict status code.
7003 *
7004 * @param pIemCpu The IEM per CPU data.
7005 * @param ppvMem Where to return the pointer to the mapped
7006 * memory.
7007 * @param cbMem The number of bytes to map. This is usually 1,
7008 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7009 * string operations it can be up to a page.
7010 * @param iSegReg The index of the segment register to use for
7011 * this access. The base and limits are checked.
7012 * Use UINT8_MAX to indicate that no segmentation
7013 * is required (for IDT, GDT and LDT accesses).
7014 * @param GCPtrMem The address of the guest memory.
7015 * @param fAccess How the memory is being accessed. The
7016 * IEM_ACCESS_TYPE_XXX bit is used to figure out
7017 * how to map the memory, while the
7018 * IEM_ACCESS_WHAT_XXX bit is used when raising
7019 * exceptions.
7020 */
7021IEM_STATIC VBOXSTRICTRC
7022iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
7023{
7024 /*
7025 * Check the input and figure out which mapping entry to use.
7026 */
7027 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7028 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
7029 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
7030
7031 unsigned iMemMap = pIemCpu->iNextMapping;
7032 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
7033 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7034 {
7035 iMemMap = iemMemMapFindFree(pIemCpu);
7036 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
7037 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
7038 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
7039 pIemCpu->aMemMappings[2].fAccess),
7040 VERR_IEM_IPE_9);
7041 }
7042
7043 /*
7044 * Map the memory, checking that we can actually access it. If something
7045 * slightly complicated happens, fall back on bounce buffering.
7046 */
7047 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7048 if (rcStrict != VINF_SUCCESS)
7049 return rcStrict;
7050
7051 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
7052 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
7053
7054 RTGCPHYS GCPhysFirst;
7055 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
7056 if (rcStrict != VINF_SUCCESS)
7057 return rcStrict;
7058
7059 void *pvMem;
7060 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7061 if (rcStrict != VINF_SUCCESS)
7062 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
7063
7064 /*
7065 * Fill in the mapping table entry.
7066 */
7067 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7068 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7069 pIemCpu->iNextMapping = iMemMap + 1;
7070 pIemCpu->cActiveMappings++;
7071
7072 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7073 *ppvMem = pvMem;
7074 return VINF_SUCCESS;
7075}
7076
7077
7078/**
7079 * Commits the guest memory if bounce buffered and unmaps it.
7080 *
7081 * @returns Strict VBox status code.
7082 * @param pIemCpu The IEM per CPU data.
7083 * @param pvMem The mapping.
7084 * @param fAccess The kind of access.
7085 */
7086IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7087{
7088 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7089 AssertReturn(iMemMap >= 0, iMemMap);
7090
7091 /* If it's bounce buffered, we may need to write back the buffer. */
7092 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7093 {
7094 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7095 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7096 }
7097 /* Otherwise unlock it. */
7098 else
7099 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7100
7101 /* Free the entry. */
7102 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7103 Assert(pIemCpu->cActiveMappings != 0);
7104 pIemCpu->cActiveMappings--;
7105 return VINF_SUCCESS;
7106}
7107
7108
7109/**
7110 * Rollbacks mappings, releasing page locks and such.
7111 *
7112 * The caller shall only call this after checking cActiveMappings.
7113 *
7114 * @returns Strict VBox status code to pass up.
7115 * @param pIemCpu The IEM per CPU data.
7116 */
7117IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7118{
7119 Assert(pIemCpu->cActiveMappings > 0);
7120
7121 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7122 while (iMemMap-- > 0)
7123 {
7124 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7125 if (fAccess != IEM_ACCESS_INVALID)
7126 {
7127 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7128 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7129 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7130 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7131 Assert(pIemCpu->cActiveMappings > 0);
7132 pIemCpu->cActiveMappings--;
7133 }
7134 }
7135}
7136
7137
7138/**
7139 * Fetches a data byte.
7140 *
7141 * @returns Strict VBox status code.
7142 * @param pIemCpu The IEM per CPU data.
7143 * @param pu8Dst Where to return the byte.
7144 * @param iSegReg The index of the segment register to use for
7145 * this access. The base and limits are checked.
7146 * @param GCPtrMem The address of the guest memory.
7147 */
7148IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7149{
7150 /* The lazy approach for now... */
7151 uint8_t const *pu8Src;
7152 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7153 if (rc == VINF_SUCCESS)
7154 {
7155 *pu8Dst = *pu8Src;
7156 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7157 }
7158 return rc;
7159}
7160
7161
7162/**
7163 * Fetches a data word.
7164 *
7165 * @returns Strict VBox status code.
7166 * @param pIemCpu The IEM per CPU data.
7167 * @param pu16Dst Where to return the word.
7168 * @param iSegReg The index of the segment register to use for
7169 * this access. The base and limits are checked.
7170 * @param GCPtrMem The address of the guest memory.
7171 */
7172IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7173{
7174 /* The lazy approach for now... */
7175 uint16_t const *pu16Src;
7176 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7177 if (rc == VINF_SUCCESS)
7178 {
7179 *pu16Dst = *pu16Src;
7180 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7181 }
7182 return rc;
7183}
7184
7185
7186/**
7187 * Fetches a data dword.
7188 *
7189 * @returns Strict VBox status code.
7190 * @param pIemCpu The IEM per CPU data.
7191 * @param pu32Dst Where to return the dword.
7192 * @param iSegReg The index of the segment register to use for
7193 * this access. The base and limits are checked.
7194 * @param GCPtrMem The address of the guest memory.
7195 */
7196IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7197{
7198 /* The lazy approach for now... */
7199 uint32_t const *pu32Src;
7200 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7201 if (rc == VINF_SUCCESS)
7202 {
7203 *pu32Dst = *pu32Src;
7204 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7205 }
7206 return rc;
7207}
7208
7209
7210#ifdef SOME_UNUSED_FUNCTION
7211/**
7212 * Fetches a data dword and sign extends it to a qword.
7213 *
7214 * @returns Strict VBox status code.
7215 * @param pIemCpu The IEM per CPU data.
7216 * @param pu64Dst Where to return the sign extended value.
7217 * @param iSegReg The index of the segment register to use for
7218 * this access. The base and limits are checked.
7219 * @param GCPtrMem The address of the guest memory.
7220 */
7221IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7222{
7223 /* The lazy approach for now... */
7224 int32_t const *pi32Src;
7225 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7226 if (rc == VINF_SUCCESS)
7227 {
7228 *pu64Dst = *pi32Src;
7229 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7230 }
7231#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7232 else
7233 *pu64Dst = 0;
7234#endif
7235 return rc;
7236}
7237#endif
7238
7239
7240/**
7241 * Fetches a data qword.
7242 *
7243 * @returns Strict VBox status code.
7244 * @param pIemCpu The IEM per CPU data.
7245 * @param pu64Dst Where to return the qword.
7246 * @param iSegReg The index of the segment register to use for
7247 * this access. The base and limits are checked.
7248 * @param GCPtrMem The address of the guest memory.
7249 */
7250IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7251{
7252 /* The lazy approach for now... */
7253 uint64_t const *pu64Src;
7254 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7255 if (rc == VINF_SUCCESS)
7256 {
7257 *pu64Dst = *pu64Src;
7258 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7259 }
7260 return rc;
7261}
7262
7263
7264/**
7265 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7266 *
7267 * @returns Strict VBox status code.
7268 * @param pIemCpu The IEM per CPU data.
7269 * @param pu64Dst Where to return the qword.
7270 * @param iSegReg The index of the segment register to use for
7271 * this access. The base and limits are checked.
7272 * @param GCPtrMem The address of the guest memory.
7273 */
7274IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7275{
7276 /* The lazy approach for now... */
7277 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7278 if (RT_UNLIKELY(GCPtrMem & 15))
7279 return iemRaiseGeneralProtectionFault0(pIemCpu);
7280
7281 uint64_t const *pu64Src;
7282 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7283 if (rc == VINF_SUCCESS)
7284 {
7285 *pu64Dst = *pu64Src;
7286 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7287 }
7288 return rc;
7289}
7290
7291
7292/**
7293 * Fetches a data tword.
7294 *
7295 * @returns Strict VBox status code.
7296 * @param pIemCpu The IEM per CPU data.
7297 * @param pr80Dst Where to return the tword.
7298 * @param iSegReg The index of the segment register to use for
7299 * this access. The base and limits are checked.
7300 * @param GCPtrMem The address of the guest memory.
7301 */
7302IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7303{
7304 /* The lazy approach for now... */
7305 PCRTFLOAT80U pr80Src;
7306 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7307 if (rc == VINF_SUCCESS)
7308 {
7309 *pr80Dst = *pr80Src;
7310 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7311 }
7312 return rc;
7313}
7314
7315
7316/**
7317 * Fetches a data dqword (double qword), generally SSE related.
7318 *
7319 * @returns Strict VBox status code.
7320 * @param pIemCpu The IEM per CPU data.
7321 * @param pu128Dst Where to return the qword.
7322 * @param iSegReg The index of the segment register to use for
7323 * this access. The base and limits are checked.
7324 * @param GCPtrMem The address of the guest memory.
7325 */
7326IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7327{
7328 /* The lazy approach for now... */
7329 uint128_t const *pu128Src;
7330 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7331 if (rc == VINF_SUCCESS)
7332 {
7333 *pu128Dst = *pu128Src;
7334 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7335 }
7336 return rc;
7337}
7338
7339
7340/**
7341 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7342 * related.
7343 *
7344 * Raises \#GP(0) if not aligned.
7345 *
7346 * @returns Strict VBox status code.
7347 * @param pIemCpu The IEM per CPU data.
7348 * @param pu128Dst Where to return the qword.
7349 * @param iSegReg The index of the segment register to use for
7350 * this access. The base and limits are checked.
7351 * @param GCPtrMem The address of the guest memory.
7352 */
7353IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7354{
7355 /* The lazy approach for now... */
7356 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7357 if ( (GCPtrMem & 15)
7358 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7359 return iemRaiseGeneralProtectionFault0(pIemCpu);
7360
7361 uint128_t const *pu128Src;
7362 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7363 if (rc == VINF_SUCCESS)
7364 {
7365 *pu128Dst = *pu128Src;
7366 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7367 }
7368 return rc;
7369}
7370
7371
7372
7373
7374/**
7375 * Fetches a descriptor register (lgdt, lidt).
7376 *
7377 * @returns Strict VBox status code.
7378 * @param pIemCpu The IEM per CPU data.
7379 * @param pcbLimit Where to return the limit.
7380 * @param pGCPtrBase Where to return the base.
7381 * @param iSegReg The index of the segment register to use for
7382 * this access. The base and limits are checked.
7383 * @param GCPtrMem The address of the guest memory.
7384 * @param enmOpSize The effective operand size.
7385 */
7386IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7387 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7388{
7389 uint8_t const *pu8Src;
7390 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7391 (void **)&pu8Src,
7392 enmOpSize == IEMMODE_64BIT
7393 ? 2 + 8
7394 : enmOpSize == IEMMODE_32BIT
7395 ? 2 + 4
7396 : 2 + 3,
7397 iSegReg,
7398 GCPtrMem,
7399 IEM_ACCESS_DATA_R);
7400 if (rcStrict == VINF_SUCCESS)
7401 {
7402 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7403 switch (enmOpSize)
7404 {
7405 case IEMMODE_16BIT:
7406 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7407 break;
7408 case IEMMODE_32BIT:
7409 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7410 break;
7411 case IEMMODE_64BIT:
7412 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7413 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7414 break;
7415
7416 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7417 }
7418 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7419 }
7420 return rcStrict;
7421}
7422
7423
7424
7425/**
7426 * Stores a data byte.
7427 *
7428 * @returns Strict VBox status code.
7429 * @param pIemCpu The IEM per CPU data.
7430 * @param iSegReg The index of the segment register to use for
7431 * this access. The base and limits are checked.
7432 * @param GCPtrMem The address of the guest memory.
7433 * @param u8Value The value to store.
7434 */
7435IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7436{
7437 /* The lazy approach for now... */
7438 uint8_t *pu8Dst;
7439 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7440 if (rc == VINF_SUCCESS)
7441 {
7442 *pu8Dst = u8Value;
7443 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7444 }
7445 return rc;
7446}
7447
7448
7449/**
7450 * Stores a data word.
7451 *
7452 * @returns Strict VBox status code.
7453 * @param pIemCpu The IEM per CPU data.
7454 * @param iSegReg The index of the segment register to use for
7455 * this access. The base and limits are checked.
7456 * @param GCPtrMem The address of the guest memory.
7457 * @param u16Value The value to store.
7458 */
7459IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7460{
7461 /* The lazy approach for now... */
7462 uint16_t *pu16Dst;
7463 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7464 if (rc == VINF_SUCCESS)
7465 {
7466 *pu16Dst = u16Value;
7467 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7468 }
7469 return rc;
7470}
7471
7472
7473/**
7474 * Stores a data dword.
7475 *
7476 * @returns Strict VBox status code.
7477 * @param pIemCpu The IEM per CPU data.
7478 * @param iSegReg The index of the segment register to use for
7479 * this access. The base and limits are checked.
7480 * @param GCPtrMem The address of the guest memory.
7481 * @param u32Value The value to store.
7482 */
7483IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7484{
7485 /* The lazy approach for now... */
7486 uint32_t *pu32Dst;
7487 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7488 if (rc == VINF_SUCCESS)
7489 {
7490 *pu32Dst = u32Value;
7491 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7492 }
7493 return rc;
7494}
7495
7496
7497/**
7498 * Stores a data qword.
7499 *
7500 * @returns Strict VBox status code.
7501 * @param pIemCpu The IEM per CPU data.
7502 * @param iSegReg The index of the segment register to use for
7503 * this access. The base and limits are checked.
7504 * @param GCPtrMem The address of the guest memory.
7505 * @param u64Value The value to store.
7506 */
7507IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7508{
7509 /* The lazy approach for now... */
7510 uint64_t *pu64Dst;
7511 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7512 if (rc == VINF_SUCCESS)
7513 {
7514 *pu64Dst = u64Value;
7515 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7516 }
7517 return rc;
7518}
7519
7520
7521/**
7522 * Stores a data dqword.
7523 *
7524 * @returns Strict VBox status code.
7525 * @param pIemCpu The IEM per CPU data.
7526 * @param iSegReg The index of the segment register to use for
7527 * this access. The base and limits are checked.
7528 * @param GCPtrMem The address of the guest memory.
7529 * @param u128Value The value to store.
7530 */
7531IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7532{
7533 /* The lazy approach for now... */
7534 uint128_t *pu128Dst;
7535 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7536 if (rc == VINF_SUCCESS)
7537 {
7538 *pu128Dst = u128Value;
7539 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7540 }
7541 return rc;
7542}
7543
7544
7545/**
7546 * Stores a data dqword, SSE aligned.
7547 *
7548 * @returns Strict VBox status code.
7549 * @param pIemCpu The IEM per CPU data.
7550 * @param iSegReg The index of the segment register to use for
7551 * this access. The base and limits are checked.
7552 * @param GCPtrMem The address of the guest memory.
7553 * @param u128Value The value to store.
7554 */
7555IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7556{
7557 /* The lazy approach for now... */
7558 if ( (GCPtrMem & 15)
7559 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7560 return iemRaiseGeneralProtectionFault0(pIemCpu);
7561
7562 uint128_t *pu128Dst;
7563 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7564 if (rc == VINF_SUCCESS)
7565 {
7566 *pu128Dst = u128Value;
7567 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7568 }
7569 return rc;
7570}
7571
7572
7573/**
7574 * Stores a descriptor register (sgdt, sidt).
7575 *
7576 * @returns Strict VBox status code.
7577 * @param pIemCpu The IEM per CPU data.
7578 * @param cbLimit The limit.
7579 * @param GCPtrBase The base address.
7580 * @param iSegReg The index of the segment register to use for
7581 * this access. The base and limits are checked.
7582 * @param GCPtrMem The address of the guest memory.
7583 * @param enmOpSize The effective operand size.
7584 */
7585IEM_STATIC VBOXSTRICTRC
7586iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7587{
7588 uint8_t *pu8Src;
7589 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7590 (void **)&pu8Src,
7591 enmOpSize == IEMMODE_64BIT
7592 ? 2 + 8
7593 : enmOpSize == IEMMODE_32BIT
7594 ? 2 + 4
7595 : 2 + 3,
7596 iSegReg,
7597 GCPtrMem,
7598 IEM_ACCESS_DATA_W);
7599 if (rcStrict == VINF_SUCCESS)
7600 {
7601 pu8Src[0] = RT_BYTE1(cbLimit);
7602 pu8Src[1] = RT_BYTE2(cbLimit);
7603 pu8Src[2] = RT_BYTE1(GCPtrBase);
7604 pu8Src[3] = RT_BYTE2(GCPtrBase);
7605 pu8Src[4] = RT_BYTE3(GCPtrBase);
7606 if (enmOpSize == IEMMODE_16BIT)
7607 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7608 else
7609 {
7610 pu8Src[5] = RT_BYTE4(GCPtrBase);
7611 if (enmOpSize == IEMMODE_64BIT)
7612 {
7613 pu8Src[6] = RT_BYTE5(GCPtrBase);
7614 pu8Src[7] = RT_BYTE6(GCPtrBase);
7615 pu8Src[8] = RT_BYTE7(GCPtrBase);
7616 pu8Src[9] = RT_BYTE8(GCPtrBase);
7617 }
7618 }
7619 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7620 }
7621 return rcStrict;
7622}
7623
7624
7625/**
7626 * Pushes a word onto the stack.
7627 *
7628 * @returns Strict VBox status code.
7629 * @param pIemCpu The IEM per CPU data.
7630 * @param u16Value The value to push.
7631 */
7632IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7633{
7634 /* Increment the stack pointer. */
7635 uint64_t uNewRsp;
7636 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7637 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7638
7639 /* Write the word the lazy way. */
7640 uint16_t *pu16Dst;
7641 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7642 if (rc == VINF_SUCCESS)
7643 {
7644 *pu16Dst = u16Value;
7645 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7646 }
7647
7648 /* Commit the new RSP value unless we an access handler made trouble. */
7649 if (rc == VINF_SUCCESS)
7650 pCtx->rsp = uNewRsp;
7651
7652 return rc;
7653}
7654
7655
7656/**
7657 * Pushes a dword onto the stack.
7658 *
7659 * @returns Strict VBox status code.
7660 * @param pIemCpu The IEM per CPU data.
7661 * @param u32Value The value to push.
7662 */
7663IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7664{
7665 /* Increment the stack pointer. */
7666 uint64_t uNewRsp;
7667 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7668 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7669
7670 /* Write the dword the lazy way. */
7671 uint32_t *pu32Dst;
7672 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7673 if (rc == VINF_SUCCESS)
7674 {
7675 *pu32Dst = u32Value;
7676 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7677 }
7678
7679 /* Commit the new RSP value unless we an access handler made trouble. */
7680 if (rc == VINF_SUCCESS)
7681 pCtx->rsp = uNewRsp;
7682
7683 return rc;
7684}
7685
7686
7687/**
7688 * Pushes a dword segment register value onto the stack.
7689 *
7690 * @returns Strict VBox status code.
7691 * @param pIemCpu The IEM per CPU data.
7692 * @param u32Value The value to push.
7693 */
7694IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7695{
7696 /* Increment the stack pointer. */
7697 uint64_t uNewRsp;
7698 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7699 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7700
7701 VBOXSTRICTRC rc;
7702 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7703 {
7704 /* The recompiler writes a full dword. */
7705 uint32_t *pu32Dst;
7706 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7707 if (rc == VINF_SUCCESS)
7708 {
7709 *pu32Dst = u32Value;
7710 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7711 }
7712 }
7713 else
7714 {
7715 /* The intel docs talks about zero extending the selector register
7716 value. My actual intel CPU here might be zero extending the value
7717 but it still only writes the lower word... */
7718 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7719 * happens when crossing an electric page boundrary, is the high word checked
7720 * for write accessibility or not? Probably it is. What about segment limits?
7721 * It appears this behavior is also shared with trap error codes.
7722 *
7723 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7724 * ancient hardware when it actually did change. */
7725 uint16_t *pu16Dst;
7726 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7727 if (rc == VINF_SUCCESS)
7728 {
7729 *pu16Dst = (uint16_t)u32Value;
7730 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7731 }
7732 }
7733
7734 /* Commit the new RSP value unless we an access handler made trouble. */
7735 if (rc == VINF_SUCCESS)
7736 pCtx->rsp = uNewRsp;
7737
7738 return rc;
7739}
7740
7741
7742/**
7743 * Pushes a qword onto the stack.
7744 *
7745 * @returns Strict VBox status code.
7746 * @param pIemCpu The IEM per CPU data.
7747 * @param u64Value The value to push.
7748 */
7749IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7750{
7751 /* Increment the stack pointer. */
7752 uint64_t uNewRsp;
7753 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7754 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7755
7756 /* Write the word the lazy way. */
7757 uint64_t *pu64Dst;
7758 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7759 if (rc == VINF_SUCCESS)
7760 {
7761 *pu64Dst = u64Value;
7762 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7763 }
7764
7765 /* Commit the new RSP value unless we an access handler made trouble. */
7766 if (rc == VINF_SUCCESS)
7767 pCtx->rsp = uNewRsp;
7768
7769 return rc;
7770}
7771
7772
7773/**
7774 * Pops a word from the stack.
7775 *
7776 * @returns Strict VBox status code.
7777 * @param pIemCpu The IEM per CPU data.
7778 * @param pu16Value Where to store the popped value.
7779 */
7780IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7781{
7782 /* Increment the stack pointer. */
7783 uint64_t uNewRsp;
7784 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7785 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7786
7787 /* Write the word the lazy way. */
7788 uint16_t const *pu16Src;
7789 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7790 if (rc == VINF_SUCCESS)
7791 {
7792 *pu16Value = *pu16Src;
7793 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7794
7795 /* Commit the new RSP value. */
7796 if (rc == VINF_SUCCESS)
7797 pCtx->rsp = uNewRsp;
7798 }
7799
7800 return rc;
7801}
7802
7803
7804/**
7805 * Pops a dword from the stack.
7806 *
7807 * @returns Strict VBox status code.
7808 * @param pIemCpu The IEM per CPU data.
7809 * @param pu32Value Where to store the popped value.
7810 */
7811IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7812{
7813 /* Increment the stack pointer. */
7814 uint64_t uNewRsp;
7815 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7816 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7817
7818 /* Write the word the lazy way. */
7819 uint32_t const *pu32Src;
7820 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7821 if (rc == VINF_SUCCESS)
7822 {
7823 *pu32Value = *pu32Src;
7824 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7825
7826 /* Commit the new RSP value. */
7827 if (rc == VINF_SUCCESS)
7828 pCtx->rsp = uNewRsp;
7829 }
7830
7831 return rc;
7832}
7833
7834
7835/**
7836 * Pops a qword from the stack.
7837 *
7838 * @returns Strict VBox status code.
7839 * @param pIemCpu The IEM per CPU data.
7840 * @param pu64Value Where to store the popped value.
7841 */
7842IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7843{
7844 /* Increment the stack pointer. */
7845 uint64_t uNewRsp;
7846 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7847 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7848
7849 /* Write the word the lazy way. */
7850 uint64_t const *pu64Src;
7851 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7852 if (rc == VINF_SUCCESS)
7853 {
7854 *pu64Value = *pu64Src;
7855 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7856
7857 /* Commit the new RSP value. */
7858 if (rc == VINF_SUCCESS)
7859 pCtx->rsp = uNewRsp;
7860 }
7861
7862 return rc;
7863}
7864
7865
7866/**
7867 * Pushes a word onto the stack, using a temporary stack pointer.
7868 *
7869 * @returns Strict VBox status code.
7870 * @param pIemCpu The IEM per CPU data.
7871 * @param u16Value The value to push.
7872 * @param pTmpRsp Pointer to the temporary stack pointer.
7873 */
7874IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7875{
7876 /* Increment the stack pointer. */
7877 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7878 RTUINT64U NewRsp = *pTmpRsp;
7879 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7880
7881 /* Write the word the lazy way. */
7882 uint16_t *pu16Dst;
7883 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7884 if (rc == VINF_SUCCESS)
7885 {
7886 *pu16Dst = u16Value;
7887 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7888 }
7889
7890 /* Commit the new RSP value unless we an access handler made trouble. */
7891 if (rc == VINF_SUCCESS)
7892 *pTmpRsp = NewRsp;
7893
7894 return rc;
7895}
7896
7897
7898/**
7899 * Pushes a dword onto the stack, using a temporary stack pointer.
7900 *
7901 * @returns Strict VBox status code.
7902 * @param pIemCpu The IEM per CPU data.
7903 * @param u32Value The value to push.
7904 * @param pTmpRsp Pointer to the temporary stack pointer.
7905 */
7906IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7907{
7908 /* Increment the stack pointer. */
7909 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7910 RTUINT64U NewRsp = *pTmpRsp;
7911 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7912
7913 /* Write the word the lazy way. */
7914 uint32_t *pu32Dst;
7915 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7916 if (rc == VINF_SUCCESS)
7917 {
7918 *pu32Dst = u32Value;
7919 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7920 }
7921
7922 /* Commit the new RSP value unless we an access handler made trouble. */
7923 if (rc == VINF_SUCCESS)
7924 *pTmpRsp = NewRsp;
7925
7926 return rc;
7927}
7928
7929
7930/**
7931 * Pushes a dword onto the stack, using a temporary stack pointer.
7932 *
7933 * @returns Strict VBox status code.
7934 * @param pIemCpu The IEM per CPU data.
7935 * @param u64Value The value to push.
7936 * @param pTmpRsp Pointer to the temporary stack pointer.
7937 */
7938IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7939{
7940 /* Increment the stack pointer. */
7941 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7942 RTUINT64U NewRsp = *pTmpRsp;
7943 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7944
7945 /* Write the word the lazy way. */
7946 uint64_t *pu64Dst;
7947 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7948 if (rc == VINF_SUCCESS)
7949 {
7950 *pu64Dst = u64Value;
7951 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7952 }
7953
7954 /* Commit the new RSP value unless we an access handler made trouble. */
7955 if (rc == VINF_SUCCESS)
7956 *pTmpRsp = NewRsp;
7957
7958 return rc;
7959}
7960
7961
7962/**
7963 * Pops a word from the stack, using a temporary stack pointer.
7964 *
7965 * @returns Strict VBox status code.
7966 * @param pIemCpu The IEM per CPU data.
7967 * @param pu16Value Where to store the popped value.
7968 * @param pTmpRsp Pointer to the temporary stack pointer.
7969 */
7970IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7971{
7972 /* Increment the stack pointer. */
7973 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7974 RTUINT64U NewRsp = *pTmpRsp;
7975 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7976
7977 /* Write the word the lazy way. */
7978 uint16_t const *pu16Src;
7979 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7980 if (rc == VINF_SUCCESS)
7981 {
7982 *pu16Value = *pu16Src;
7983 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7984
7985 /* Commit the new RSP value. */
7986 if (rc == VINF_SUCCESS)
7987 *pTmpRsp = NewRsp;
7988 }
7989
7990 return rc;
7991}
7992
7993
7994/**
7995 * Pops a dword from the stack, using a temporary stack pointer.
7996 *
7997 * @returns Strict VBox status code.
7998 * @param pIemCpu The IEM per CPU data.
7999 * @param pu32Value Where to store the popped value.
8000 * @param pTmpRsp Pointer to the temporary stack pointer.
8001 */
8002IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
8003{
8004 /* Increment the stack pointer. */
8005 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8006 RTUINT64U NewRsp = *pTmpRsp;
8007 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
8008
8009 /* Write the word the lazy way. */
8010 uint32_t const *pu32Src;
8011 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8012 if (rc == VINF_SUCCESS)
8013 {
8014 *pu32Value = *pu32Src;
8015 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8016
8017 /* Commit the new RSP value. */
8018 if (rc == VINF_SUCCESS)
8019 *pTmpRsp = NewRsp;
8020 }
8021
8022 return rc;
8023}
8024
8025
8026/**
8027 * Pops a qword from the stack, using a temporary stack pointer.
8028 *
8029 * @returns Strict VBox status code.
8030 * @param pIemCpu The IEM per CPU data.
8031 * @param pu64Value Where to store the popped value.
8032 * @param pTmpRsp Pointer to the temporary stack pointer.
8033 */
8034IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
8035{
8036 /* Increment the stack pointer. */
8037 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8038 RTUINT64U NewRsp = *pTmpRsp;
8039 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8040
8041 /* Write the word the lazy way. */
8042 uint64_t const *pu64Src;
8043 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8044 if (rcStrict == VINF_SUCCESS)
8045 {
8046 *pu64Value = *pu64Src;
8047 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8048
8049 /* Commit the new RSP value. */
8050 if (rcStrict == VINF_SUCCESS)
8051 *pTmpRsp = NewRsp;
8052 }
8053
8054 return rcStrict;
8055}
8056
8057
8058/**
8059 * Begin a special stack push (used by interrupt, exceptions and such).
8060 *
8061 * This will raise \#SS or \#PF if appropriate.
8062 *
8063 * @returns Strict VBox status code.
8064 * @param pIemCpu The IEM per CPU data.
8065 * @param cbMem The number of bytes to push onto the stack.
8066 * @param ppvMem Where to return the pointer to the stack memory.
8067 * As with the other memory functions this could be
8068 * direct access or bounce buffered access, so
8069 * don't commit register until the commit call
8070 * succeeds.
8071 * @param puNewRsp Where to return the new RSP value. This must be
8072 * passed unchanged to
8073 * iemMemStackPushCommitSpecial().
8074 */
8075IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8076{
8077 Assert(cbMem < UINT8_MAX);
8078 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8079 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8080 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8081}
8082
8083
8084/**
8085 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8086 *
8087 * This will update the rSP.
8088 *
8089 * @returns Strict VBox status code.
8090 * @param pIemCpu The IEM per CPU data.
8091 * @param pvMem The pointer returned by
8092 * iemMemStackPushBeginSpecial().
8093 * @param uNewRsp The new RSP value returned by
8094 * iemMemStackPushBeginSpecial().
8095 */
8096IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8097{
8098 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8099 if (rcStrict == VINF_SUCCESS)
8100 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8101 return rcStrict;
8102}
8103
8104
8105/**
8106 * Begin a special stack pop (used by iret, retf and such).
8107 *
8108 * This will raise \#SS or \#PF if appropriate.
8109 *
8110 * @returns Strict VBox status code.
8111 * @param pIemCpu The IEM per CPU data.
8112 * @param cbMem The number of bytes to push onto the stack.
8113 * @param ppvMem Where to return the pointer to the stack memory.
8114 * @param puNewRsp Where to return the new RSP value. This must be
8115 * passed unchanged to
8116 * iemMemStackPopCommitSpecial() or applied
8117 * manually if iemMemStackPopDoneSpecial() is used.
8118 */
8119IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8120{
8121 Assert(cbMem < UINT8_MAX);
8122 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8123 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8124 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8125}
8126
8127
8128/**
8129 * Continue a special stack pop (used by iret and retf).
8130 *
8131 * This will raise \#SS or \#PF if appropriate.
8132 *
8133 * @returns Strict VBox status code.
8134 * @param pIemCpu The IEM per CPU data.
8135 * @param cbMem The number of bytes to push onto the stack.
8136 * @param ppvMem Where to return the pointer to the stack memory.
8137 * @param puNewRsp Where to return the new RSP value. This must be
8138 * passed unchanged to
8139 * iemMemStackPopCommitSpecial() or applied
8140 * manually if iemMemStackPopDoneSpecial() is used.
8141 */
8142IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8143{
8144 Assert(cbMem < UINT8_MAX);
8145 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8146 RTUINT64U NewRsp;
8147 NewRsp.u = *puNewRsp;
8148 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8149 *puNewRsp = NewRsp.u;
8150 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8151}
8152
8153
8154/**
8155 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8156 *
8157 * This will update the rSP.
8158 *
8159 * @returns Strict VBox status code.
8160 * @param pIemCpu The IEM per CPU data.
8161 * @param pvMem The pointer returned by
8162 * iemMemStackPopBeginSpecial().
8163 * @param uNewRsp The new RSP value returned by
8164 * iemMemStackPopBeginSpecial().
8165 */
8166IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8167{
8168 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8169 if (rcStrict == VINF_SUCCESS)
8170 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8171 return rcStrict;
8172}
8173
8174
8175/**
8176 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8177 * iemMemStackPopContinueSpecial).
8178 *
8179 * The caller will manually commit the rSP.
8180 *
8181 * @returns Strict VBox status code.
8182 * @param pIemCpu The IEM per CPU data.
8183 * @param pvMem The pointer returned by
8184 * iemMemStackPopBeginSpecial() or
8185 * iemMemStackPopContinueSpecial().
8186 */
8187IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8188{
8189 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8190}
8191
8192
8193/**
8194 * Fetches a system table byte.
8195 *
8196 * @returns Strict VBox status code.
8197 * @param pIemCpu The IEM per CPU data.
8198 * @param pbDst Where to return the byte.
8199 * @param iSegReg The index of the segment register to use for
8200 * this access. The base and limits are checked.
8201 * @param GCPtrMem The address of the guest memory.
8202 */
8203IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8204{
8205 /* The lazy approach for now... */
8206 uint8_t const *pbSrc;
8207 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8208 if (rc == VINF_SUCCESS)
8209 {
8210 *pbDst = *pbSrc;
8211 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8212 }
8213 return rc;
8214}
8215
8216
8217/**
8218 * Fetches a system table word.
8219 *
8220 * @returns Strict VBox status code.
8221 * @param pIemCpu The IEM per CPU data.
8222 * @param pu16Dst Where to return the word.
8223 * @param iSegReg The index of the segment register to use for
8224 * this access. The base and limits are checked.
8225 * @param GCPtrMem The address of the guest memory.
8226 */
8227IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8228{
8229 /* The lazy approach for now... */
8230 uint16_t const *pu16Src;
8231 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8232 if (rc == VINF_SUCCESS)
8233 {
8234 *pu16Dst = *pu16Src;
8235 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8236 }
8237 return rc;
8238}
8239
8240
8241/**
8242 * Fetches a system table dword.
8243 *
8244 * @returns Strict VBox status code.
8245 * @param pIemCpu The IEM per CPU data.
8246 * @param pu32Dst Where to return the dword.
8247 * @param iSegReg The index of the segment register to use for
8248 * this access. The base and limits are checked.
8249 * @param GCPtrMem The address of the guest memory.
8250 */
8251IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8252{
8253 /* The lazy approach for now... */
8254 uint32_t const *pu32Src;
8255 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8256 if (rc == VINF_SUCCESS)
8257 {
8258 *pu32Dst = *pu32Src;
8259 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8260 }
8261 return rc;
8262}
8263
8264
8265/**
8266 * Fetches a system table qword.
8267 *
8268 * @returns Strict VBox status code.
8269 * @param pIemCpu The IEM per CPU data.
8270 * @param pu64Dst Where to return the qword.
8271 * @param iSegReg The index of the segment register to use for
8272 * this access. The base and limits are checked.
8273 * @param GCPtrMem The address of the guest memory.
8274 */
8275IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8276{
8277 /* The lazy approach for now... */
8278 uint64_t const *pu64Src;
8279 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8280 if (rc == VINF_SUCCESS)
8281 {
8282 *pu64Dst = *pu64Src;
8283 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8284 }
8285 return rc;
8286}
8287
8288
8289/**
8290 * Fetches a descriptor table entry with caller specified error code.
8291 *
8292 * @returns Strict VBox status code.
8293 * @param pIemCpu The IEM per CPU.
8294 * @param pDesc Where to return the descriptor table entry.
8295 * @param uSel The selector which table entry to fetch.
8296 * @param uXcpt The exception to raise on table lookup error.
8297 * @param uErrorCode The error code associated with the exception.
8298 */
8299IEM_STATIC VBOXSTRICTRC
8300iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8301{
8302 AssertPtr(pDesc);
8303 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8304
8305 /** @todo did the 286 require all 8 bytes to be accessible? */
8306 /*
8307 * Get the selector table base and check bounds.
8308 */
8309 RTGCPTR GCPtrBase;
8310 if (uSel & X86_SEL_LDT)
8311 {
8312 if ( !pCtx->ldtr.Attr.n.u1Present
8313 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8314 {
8315 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8316 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8317 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8318 uErrorCode, 0);
8319 }
8320
8321 Assert(pCtx->ldtr.Attr.n.u1Present);
8322 GCPtrBase = pCtx->ldtr.u64Base;
8323 }
8324 else
8325 {
8326 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8327 {
8328 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8329 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8330 uErrorCode, 0);
8331 }
8332 GCPtrBase = pCtx->gdtr.pGdt;
8333 }
8334
8335 /*
8336 * Read the legacy descriptor and maybe the long mode extensions if
8337 * required.
8338 */
8339 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8340 if (rcStrict == VINF_SUCCESS)
8341 {
8342 if ( !IEM_IS_LONG_MODE(pIemCpu)
8343 || pDesc->Legacy.Gen.u1DescType)
8344 pDesc->Long.au64[1] = 0;
8345 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8346 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8347 else
8348 {
8349 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8350 /** @todo is this the right exception? */
8351 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8352 }
8353 }
8354 return rcStrict;
8355}
8356
8357
8358/**
8359 * Fetches a descriptor table entry.
8360 *
8361 * @returns Strict VBox status code.
8362 * @param pIemCpu The IEM per CPU.
8363 * @param pDesc Where to return the descriptor table entry.
8364 * @param uSel The selector which table entry to fetch.
8365 * @param uXcpt The exception to raise on table lookup error.
8366 */
8367IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8368{
8369 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8370}
8371
8372
8373/**
8374 * Fakes a long mode stack selector for SS = 0.
8375 *
8376 * @param pDescSs Where to return the fake stack descriptor.
8377 * @param uDpl The DPL we want.
8378 */
8379IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8380{
8381 pDescSs->Long.au64[0] = 0;
8382 pDescSs->Long.au64[1] = 0;
8383 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8384 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8385 pDescSs->Long.Gen.u2Dpl = uDpl;
8386 pDescSs->Long.Gen.u1Present = 1;
8387 pDescSs->Long.Gen.u1Long = 1;
8388}
8389
8390
8391/**
8392 * Marks the selector descriptor as accessed (only non-system descriptors).
8393 *
8394 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8395 * will therefore skip the limit checks.
8396 *
8397 * @returns Strict VBox status code.
8398 * @param pIemCpu The IEM per CPU.
8399 * @param uSel The selector.
8400 */
8401IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8402{
8403 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8404
8405 /*
8406 * Get the selector table base and calculate the entry address.
8407 */
8408 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8409 ? pCtx->ldtr.u64Base
8410 : pCtx->gdtr.pGdt;
8411 GCPtr += uSel & X86_SEL_MASK;
8412
8413 /*
8414 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8415 * ugly stuff to avoid this. This will make sure it's an atomic access
8416 * as well more or less remove any question about 8-bit or 32-bit accesss.
8417 */
8418 VBOXSTRICTRC rcStrict;
8419 uint32_t volatile *pu32;
8420 if ((GCPtr & 3) == 0)
8421 {
8422 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8423 GCPtr += 2 + 2;
8424 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8425 if (rcStrict != VINF_SUCCESS)
8426 return rcStrict;
8427 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8428 }
8429 else
8430 {
8431 /* The misaligned GDT/LDT case, map the whole thing. */
8432 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8433 if (rcStrict != VINF_SUCCESS)
8434 return rcStrict;
8435 switch ((uintptr_t)pu32 & 3)
8436 {
8437 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8438 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8439 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8440 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8441 }
8442 }
8443
8444 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8445}
8446
8447/** @} */
8448
8449
8450/*
8451 * Include the C/C++ implementation of instruction.
8452 */
8453#include "IEMAllCImpl.cpp.h"
8454
8455
8456
8457/** @name "Microcode" macros.
8458 *
8459 * The idea is that we should be able to use the same code to interpret
8460 * instructions as well as recompiler instructions. Thus this obfuscation.
8461 *
8462 * @{
8463 */
8464#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8465#define IEM_MC_END() }
8466#define IEM_MC_PAUSE() do {} while (0)
8467#define IEM_MC_CONTINUE() do {} while (0)
8468
8469/** Internal macro. */
8470#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8471 do \
8472 { \
8473 VBOXSTRICTRC rcStrict2 = a_Expr; \
8474 if (rcStrict2 != VINF_SUCCESS) \
8475 return rcStrict2; \
8476 } while (0)
8477
8478#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8479#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8480#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8481#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8482#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8483#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8484#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8485
8486#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8487#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8488 do { \
8489 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8490 return iemRaiseDeviceNotAvailable(pIemCpu); \
8491 } while (0)
8492#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8493 do { \
8494 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8495 return iemRaiseMathFault(pIemCpu); \
8496 } while (0)
8497#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8498 do { \
8499 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8500 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8501 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8502 return iemRaiseUndefinedOpcode(pIemCpu); \
8503 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8504 return iemRaiseDeviceNotAvailable(pIemCpu); \
8505 } while (0)
8506#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8507 do { \
8508 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8509 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8510 return iemRaiseUndefinedOpcode(pIemCpu); \
8511 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8512 return iemRaiseDeviceNotAvailable(pIemCpu); \
8513 } while (0)
8514#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8515 do { \
8516 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8517 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8518 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8519 return iemRaiseUndefinedOpcode(pIemCpu); \
8520 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8521 return iemRaiseDeviceNotAvailable(pIemCpu); \
8522 } while (0)
8523#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8524 do { \
8525 if (pIemCpu->uCpl != 0) \
8526 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8527 } while (0)
8528
8529
8530#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8531#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8532#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8533#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8534#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8535#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8536#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8537 uint32_t a_Name; \
8538 uint32_t *a_pName = &a_Name
8539#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8540 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8541
8542#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8543#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8544
8545#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8546#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8547#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8548#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8549#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8550#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8551#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8552#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8553#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8554#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8555#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8556#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8557#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8558#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8559#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8560#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8561#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8562#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8563#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8564#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8565#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8566#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8567#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8568#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8569#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8570#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8571#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8572#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8573#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8574/** @note Not for IOPL or IF testing or modification. */
8575#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8576#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8577#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8578#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8579
8580#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8581#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8582#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8583#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8584#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8585#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8586#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8587#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8588#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8589#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8590#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8591 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8592
8593#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8594#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8595/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8596 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8597#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8598#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8599/** @note Not for IOPL or IF testing or modification. */
8600#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8601
8602#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8603#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8604#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8605 do { \
8606 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8607 *pu32Reg += (a_u32Value); \
8608 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8609 } while (0)
8610#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8611
8612#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8613#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8614#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8615 do { \
8616 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8617 *pu32Reg -= (a_u32Value); \
8618 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8619 } while (0)
8620#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8621#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
8622
8623#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8624#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8625#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8626#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8627#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8628#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8629#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8630
8631#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8632#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8633#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8634#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8635
8636#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8637#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8638#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8639
8640#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8641#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
8642#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8643
8644#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8645#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8646#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8647
8648#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8649#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8650#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8651
8652#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8653
8654#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8655
8656#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8657#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8658#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8659 do { \
8660 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8661 *pu32Reg &= (a_u32Value); \
8662 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8663 } while (0)
8664#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8665
8666#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8667#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8668#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8669 do { \
8670 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8671 *pu32Reg |= (a_u32Value); \
8672 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8673 } while (0)
8674#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8675
8676
8677/** @note Not for IOPL or IF modification. */
8678#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8679/** @note Not for IOPL or IF modification. */
8680#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8681/** @note Not for IOPL or IF modification. */
8682#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8683
8684#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8685
8686
8687#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8688 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8689#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8690 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8691#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8692 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8693#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8694 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8695#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8696 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8697#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8698 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8699#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8700 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8701
8702#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8703 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8704#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8705 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8706#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8707 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8708#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8709 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8710#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8711 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8712 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8713 } while (0)
8714#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8715 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8716 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8717 } while (0)
8718#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8719 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8720#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8721 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8722#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8723 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8724
8725#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8727#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8728 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8729#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8730 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8731
8732#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8734#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8736#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8737 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8738
8739#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8741#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8742 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8743#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8744 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8745
8746#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8748
8749#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8750 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8751#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8753#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8754 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8755#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8756 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8757
8758#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8760#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8761 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8762#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8764
8765#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8767#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8769
8770
8771
8772#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8773 do { \
8774 uint8_t u8Tmp; \
8775 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8776 (a_u16Dst) = u8Tmp; \
8777 } while (0)
8778#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8779 do { \
8780 uint8_t u8Tmp; \
8781 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8782 (a_u32Dst) = u8Tmp; \
8783 } while (0)
8784#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8785 do { \
8786 uint8_t u8Tmp; \
8787 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8788 (a_u64Dst) = u8Tmp; \
8789 } while (0)
8790#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8791 do { \
8792 uint16_t u16Tmp; \
8793 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8794 (a_u32Dst) = u16Tmp; \
8795 } while (0)
8796#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8797 do { \
8798 uint16_t u16Tmp; \
8799 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8800 (a_u64Dst) = u16Tmp; \
8801 } while (0)
8802#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8803 do { \
8804 uint32_t u32Tmp; \
8805 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8806 (a_u64Dst) = u32Tmp; \
8807 } while (0)
8808
8809#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8810 do { \
8811 uint8_t u8Tmp; \
8812 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8813 (a_u16Dst) = (int8_t)u8Tmp; \
8814 } while (0)
8815#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8816 do { \
8817 uint8_t u8Tmp; \
8818 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8819 (a_u32Dst) = (int8_t)u8Tmp; \
8820 } while (0)
8821#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8822 do { \
8823 uint8_t u8Tmp; \
8824 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8825 (a_u64Dst) = (int8_t)u8Tmp; \
8826 } while (0)
8827#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8828 do { \
8829 uint16_t u16Tmp; \
8830 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8831 (a_u32Dst) = (int16_t)u16Tmp; \
8832 } while (0)
8833#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8834 do { \
8835 uint16_t u16Tmp; \
8836 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8837 (a_u64Dst) = (int16_t)u16Tmp; \
8838 } while (0)
8839#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8840 do { \
8841 uint32_t u32Tmp; \
8842 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8843 (a_u64Dst) = (int32_t)u32Tmp; \
8844 } while (0)
8845
8846#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8847 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8848#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8849 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8850#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8851 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8852#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8853 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8854
8855#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8856 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8857#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8858 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8859#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8860 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8861#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8862 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8863
8864#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8865#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8866#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8867#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8868#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8869#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8870#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8871 do { \
8872 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8873 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8874 } while (0)
8875
8876#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8877 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8878#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8879 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8880
8881
8882#define IEM_MC_PUSH_U16(a_u16Value) \
8883 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8884#define IEM_MC_PUSH_U32(a_u32Value) \
8885 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8886#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8887 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8888#define IEM_MC_PUSH_U64(a_u64Value) \
8889 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8890
8891#define IEM_MC_POP_U16(a_pu16Value) \
8892 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8893#define IEM_MC_POP_U32(a_pu32Value) \
8894 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8895#define IEM_MC_POP_U64(a_pu64Value) \
8896 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8897
8898/** Maps guest memory for direct or bounce buffered access.
8899 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8900 * @remarks May return.
8901 */
8902#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8903 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8904
8905/** Maps guest memory for direct or bounce buffered access.
8906 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8907 * @remarks May return.
8908 */
8909#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8910 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8911
8912/** Commits the memory and unmaps the guest memory.
8913 * @remarks May return.
8914 */
8915#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8916 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8917
8918/** Commits the memory and unmaps the guest memory unless the FPU status word
8919 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8920 * that would cause FLD not to store.
8921 *
8922 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8923 * store, while \#P will not.
8924 *
8925 * @remarks May in theory return - for now.
8926 */
8927#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8928 do { \
8929 if ( !(a_u16FSW & X86_FSW_ES) \
8930 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8931 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8932 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8933 } while (0)
8934
8935/** Calculate efficient address from R/M. */
8936#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8937 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8938
8939#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8940#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8941#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8942#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8943#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8944#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8945#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8946
8947/**
8948 * Defers the rest of the instruction emulation to a C implementation routine
8949 * and returns, only taking the standard parameters.
8950 *
8951 * @param a_pfnCImpl The pointer to the C routine.
8952 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8953 */
8954#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8955
8956/**
8957 * Defers the rest of instruction emulation to a C implementation routine and
8958 * returns, taking one argument in addition to the standard ones.
8959 *
8960 * @param a_pfnCImpl The pointer to the C routine.
8961 * @param a0 The argument.
8962 */
8963#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8964
8965/**
8966 * Defers the rest of the instruction emulation to a C implementation routine
8967 * and returns, taking two arguments in addition to the standard ones.
8968 *
8969 * @param a_pfnCImpl The pointer to the C routine.
8970 * @param a0 The first extra argument.
8971 * @param a1 The second extra argument.
8972 */
8973#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8974
8975/**
8976 * Defers the rest of the instruction emulation to a C implementation routine
8977 * and returns, taking three arguments in addition to the standard ones.
8978 *
8979 * @param a_pfnCImpl The pointer to the C routine.
8980 * @param a0 The first extra argument.
8981 * @param a1 The second extra argument.
8982 * @param a2 The third extra argument.
8983 */
8984#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8985
8986/**
8987 * Defers the rest of the instruction emulation to a C implementation routine
8988 * and returns, taking four arguments in addition to the standard ones.
8989 *
8990 * @param a_pfnCImpl The pointer to the C routine.
8991 * @param a0 The first extra argument.
8992 * @param a1 The second extra argument.
8993 * @param a2 The third extra argument.
8994 * @param a3 The fourth extra argument.
8995 */
8996#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8997
8998/**
8999 * Defers the rest of the instruction emulation to a C implementation routine
9000 * and returns, taking two arguments in addition to the standard ones.
9001 *
9002 * @param a_pfnCImpl The pointer to the C routine.
9003 * @param a0 The first extra argument.
9004 * @param a1 The second extra argument.
9005 * @param a2 The third extra argument.
9006 * @param a3 The fourth extra argument.
9007 * @param a4 The fifth extra argument.
9008 */
9009#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
9010
9011/**
9012 * Defers the entire instruction emulation to a C implementation routine and
9013 * returns, only taking the standard parameters.
9014 *
9015 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9016 *
9017 * @param a_pfnCImpl The pointer to the C routine.
9018 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
9019 */
9020#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
9021
9022/**
9023 * Defers the entire instruction emulation to a C implementation routine and
9024 * returns, taking one argument in addition to the standard ones.
9025 *
9026 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9027 *
9028 * @param a_pfnCImpl The pointer to the C routine.
9029 * @param a0 The argument.
9030 */
9031#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
9032
9033/**
9034 * Defers the entire instruction emulation to a C implementation routine and
9035 * returns, taking two arguments in addition to the standard ones.
9036 *
9037 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9038 *
9039 * @param a_pfnCImpl The pointer to the C routine.
9040 * @param a0 The first extra argument.
9041 * @param a1 The second extra argument.
9042 */
9043#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
9044
9045/**
9046 * Defers the entire instruction emulation to a C implementation routine and
9047 * returns, taking three arguments in addition to the standard ones.
9048 *
9049 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9050 *
9051 * @param a_pfnCImpl The pointer to the C routine.
9052 * @param a0 The first extra argument.
9053 * @param a1 The second extra argument.
9054 * @param a2 The third extra argument.
9055 */
9056#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
9057
9058/**
9059 * Calls a FPU assembly implementation taking one visible argument.
9060 *
9061 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9062 * @param a0 The first extra argument.
9063 */
9064#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
9065 do { \
9066 iemFpuPrepareUsage(pIemCpu); \
9067 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
9068 } while (0)
9069
9070/**
9071 * Calls a FPU assembly implementation taking two visible arguments.
9072 *
9073 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9074 * @param a0 The first extra argument.
9075 * @param a1 The second extra argument.
9076 */
9077#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9078 do { \
9079 iemFpuPrepareUsage(pIemCpu); \
9080 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9081 } while (0)
9082
9083/**
9084 * Calls a FPU assembly implementation taking three visible arguments.
9085 *
9086 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9087 * @param a0 The first extra argument.
9088 * @param a1 The second extra argument.
9089 * @param a2 The third extra argument.
9090 */
9091#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9092 do { \
9093 iemFpuPrepareUsage(pIemCpu); \
9094 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9095 } while (0)
9096
9097#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9098 do { \
9099 (a_FpuData).FSW = (a_FSW); \
9100 (a_FpuData).r80Result = *(a_pr80Value); \
9101 } while (0)
9102
9103/** Pushes FPU result onto the stack. */
9104#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9105 iemFpuPushResult(pIemCpu, &a_FpuData)
9106/** Pushes FPU result onto the stack and sets the FPUDP. */
9107#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9108 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9109
9110/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9111#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9112 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9113
9114/** Stores FPU result in a stack register. */
9115#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9116 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9117/** Stores FPU result in a stack register and pops the stack. */
9118#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9119 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9120/** Stores FPU result in a stack register and sets the FPUDP. */
9121#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9122 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9123/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9124 * stack. */
9125#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9126 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9127
9128/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9129#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9130 iemFpuUpdateOpcodeAndIp(pIemCpu)
9131/** Free a stack register (for FFREE and FFREEP). */
9132#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9133 iemFpuStackFree(pIemCpu, a_iStReg)
9134/** Increment the FPU stack pointer. */
9135#define IEM_MC_FPU_STACK_INC_TOP() \
9136 iemFpuStackIncTop(pIemCpu)
9137/** Decrement the FPU stack pointer. */
9138#define IEM_MC_FPU_STACK_DEC_TOP() \
9139 iemFpuStackDecTop(pIemCpu)
9140
9141/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9142#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9143 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9144/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9145#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9146 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9147/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9148#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9149 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9150/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9151#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9152 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9153/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9154 * stack. */
9155#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9156 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9157/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9158#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9159 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9160
9161/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9162#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9163 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9164/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9165 * stack. */
9166#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9167 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9168/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9169 * FPUDS. */
9170#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9171 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9172/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9173 * FPUDS. Pops stack. */
9174#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9175 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9176/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9177 * stack twice. */
9178#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9179 iemFpuStackUnderflowThenPopPop(pIemCpu)
9180/** Raises a FPU stack underflow exception for an instruction pushing a result
9181 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9182#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9183 iemFpuStackPushUnderflow(pIemCpu)
9184/** Raises a FPU stack underflow exception for an instruction pushing a result
9185 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9186#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9187 iemFpuStackPushUnderflowTwo(pIemCpu)
9188
9189/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9190 * FPUIP, FPUCS and FOP. */
9191#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9192 iemFpuStackPushOverflow(pIemCpu)
9193/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9194 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9195#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9196 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9197/** Indicates that we (might) have modified the FPU state. */
9198#define IEM_MC_USED_FPU() \
9199 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9200
9201/**
9202 * Calls a MMX assembly implementation taking two visible arguments.
9203 *
9204 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9205 * @param a0 The first extra argument.
9206 * @param a1 The second extra argument.
9207 */
9208#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9209 do { \
9210 iemFpuPrepareUsage(pIemCpu); \
9211 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9212 } while (0)
9213
9214/**
9215 * Calls a MMX assembly implementation taking three visible arguments.
9216 *
9217 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9218 * @param a0 The first extra argument.
9219 * @param a1 The second extra argument.
9220 * @param a2 The third extra argument.
9221 */
9222#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9223 do { \
9224 iemFpuPrepareUsage(pIemCpu); \
9225 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9226 } while (0)
9227
9228
9229/**
9230 * Calls a SSE assembly implementation taking two visible arguments.
9231 *
9232 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9233 * @param a0 The first extra argument.
9234 * @param a1 The second extra argument.
9235 */
9236#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9237 do { \
9238 iemFpuPrepareUsageSse(pIemCpu); \
9239 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9240 } while (0)
9241
9242/**
9243 * Calls a SSE assembly implementation taking three visible arguments.
9244 *
9245 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9246 * @param a0 The first extra argument.
9247 * @param a1 The second extra argument.
9248 * @param a2 The third extra argument.
9249 */
9250#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9251 do { \
9252 iemFpuPrepareUsageSse(pIemCpu); \
9253 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9254 } while (0)
9255
9256
9257/** @note Not for IOPL or IF testing. */
9258#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9259/** @note Not for IOPL or IF testing. */
9260#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9261/** @note Not for IOPL or IF testing. */
9262#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9263/** @note Not for IOPL or IF testing. */
9264#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9265/** @note Not for IOPL or IF testing. */
9266#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9267 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9268 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9269/** @note Not for IOPL or IF testing. */
9270#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9271 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9272 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9273/** @note Not for IOPL or IF testing. */
9274#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9275 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9276 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9277 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9278/** @note Not for IOPL or IF testing. */
9279#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9280 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9281 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9282 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9283#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9284#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9285#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9286/** @note Not for IOPL or IF testing. */
9287#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9288 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9289 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9290/** @note Not for IOPL or IF testing. */
9291#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9292 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9293 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9294/** @note Not for IOPL or IF testing. */
9295#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9296 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9297 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9298/** @note Not for IOPL or IF testing. */
9299#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9300 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9301 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9302/** @note Not for IOPL or IF testing. */
9303#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9304 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9305 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9306/** @note Not for IOPL or IF testing. */
9307#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9308 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9309 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9310#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9311#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9312#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9313 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9314#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9315 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9316#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9317 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9318#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9319 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9320#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9321 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9322#define IEM_MC_IF_FCW_IM() \
9323 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9324
9325#define IEM_MC_ELSE() } else {
9326#define IEM_MC_ENDIF() } do {} while (0)
9327
9328/** @} */
9329
9330
9331/** @name Opcode Debug Helpers.
9332 * @{
9333 */
9334#ifdef DEBUG
9335# define IEMOP_MNEMONIC(a_szMnemonic) \
9336 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9337 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9338# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9339 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9340 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9341#else
9342# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9343# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9344#endif
9345
9346/** @} */
9347
9348
9349/** @name Opcode Helpers.
9350 * @{
9351 */
9352
9353#ifdef IN_RING3
9354# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9355 do { \
9356 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9357 else \
9358 { \
9359 DBGFSTOP(IEMCPU_TO_VM(pIemCpu)); \
9360 return IEMOP_RAISE_INVALID_OPCODE(); \
9361 } \
9362 } while (0)
9363#else
9364# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9365 do { \
9366 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9367 else return IEMOP_RAISE_INVALID_OPCODE(); \
9368 } while (0)
9369#endif
9370
9371/** The instruction requires a 186 or later. */
9372#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
9373# define IEMOP_HLP_MIN_186() do { } while (0)
9374#else
9375# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
9376#endif
9377
9378/** The instruction requires a 286 or later. */
9379#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
9380# define IEMOP_HLP_MIN_286() do { } while (0)
9381#else
9382# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
9383#endif
9384
9385/** The instruction requires a 386 or later. */
9386#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9387# define IEMOP_HLP_MIN_386() do { } while (0)
9388#else
9389# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
9390#endif
9391
9392/** The instruction requires a 386 or later if the given expression is true. */
9393#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9394# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
9395#else
9396# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
9397#endif
9398
9399/** The instruction requires a 486 or later. */
9400#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
9401# define IEMOP_HLP_MIN_486() do { } while (0)
9402#else
9403# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
9404#endif
9405
9406/** The instruction requires a Pentium (586) or later. */
9407#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_586
9408# define IEMOP_HLP_MIN_586() do { } while (0)
9409#else
9410# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_586, true)
9411#endif
9412
9413/** The instruction requires a PentiumPro (686) or later. */
9414#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_686
9415# define IEMOP_HLP_MIN_686() do { } while (0)
9416#else
9417# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_686, true)
9418#endif
9419
9420
9421/** The instruction raises an \#UD in real and V8086 mode. */
9422#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9423 do \
9424 { \
9425 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9426 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9427 } while (0)
9428
9429/** The instruction allows no lock prefixing (in this encoding), throw \#UD if
9430 * lock prefixed.
9431 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9432#define IEMOP_HLP_NO_LOCK_PREFIX() \
9433 do \
9434 { \
9435 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9436 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9437 } while (0)
9438
9439/** The instruction is not available in 64-bit mode, throw \#UD if we're in
9440 * 64-bit mode. */
9441#define IEMOP_HLP_NO_64BIT() \
9442 do \
9443 { \
9444 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9445 return IEMOP_RAISE_INVALID_OPCODE(); \
9446 } while (0)
9447
9448/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
9449 * 64-bit mode. */
9450#define IEMOP_HLP_ONLY_64BIT() \
9451 do \
9452 { \
9453 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9454 return IEMOP_RAISE_INVALID_OPCODE(); \
9455 } while (0)
9456
9457/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9458#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9459 do \
9460 { \
9461 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9462 iemRecalEffOpSize64Default(pIemCpu); \
9463 } while (0)
9464
9465/** The instruction has 64-bit operand size if 64-bit mode. */
9466#define IEMOP_HLP_64BIT_OP_SIZE() \
9467 do \
9468 { \
9469 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9470 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9471 } while (0)
9472
9473/** Only a REX prefix immediately preceeding the first opcode byte takes
9474 * effect. This macro helps ensuring this as well as logging bad guest code. */
9475#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9476 do \
9477 { \
9478 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9479 { \
9480 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9481 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9482 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9483 pIemCpu->uRexB = 0; \
9484 pIemCpu->uRexIndex = 0; \
9485 pIemCpu->uRexReg = 0; \
9486 iemRecalEffOpSize(pIemCpu); \
9487 } \
9488 } while (0)
9489
9490/**
9491 * Done decoding.
9492 */
9493#define IEMOP_HLP_DONE_DECODING() \
9494 do \
9495 { \
9496 /*nothing for now, maybe later... */ \
9497 } while (0)
9498
9499/**
9500 * Done decoding, raise \#UD exception if lock prefix present.
9501 */
9502#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9503 do \
9504 { \
9505 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9506 { /* likely */ } \
9507 else \
9508 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9509 } while (0)
9510#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9511 do \
9512 { \
9513 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9514 { /* likely */ } \
9515 else \
9516 { \
9517 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9518 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9519 } \
9520 } while (0)
9521#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9522 do \
9523 { \
9524 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9525 { /* likely */ } \
9526 else \
9527 { \
9528 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9529 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9530 } \
9531 } while (0)
9532/**
9533 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9534 * are present.
9535 */
9536#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9537 do \
9538 { \
9539 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9540 { /* likely */ } \
9541 else \
9542 return IEMOP_RAISE_INVALID_OPCODE(); \
9543 } while (0)
9544
9545
9546/**
9547 * Calculates the effective address of a ModR/M memory operand.
9548 *
9549 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9550 *
9551 * @return Strict VBox status code.
9552 * @param pIemCpu The IEM per CPU data.
9553 * @param bRm The ModRM byte.
9554 * @param cbImm The size of any immediate following the
9555 * effective address opcode bytes. Important for
9556 * RIP relative addressing.
9557 * @param pGCPtrEff Where to return the effective address.
9558 */
9559IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9560{
9561 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9562 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9563#define SET_SS_DEF() \
9564 do \
9565 { \
9566 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9567 pIemCpu->iEffSeg = X86_SREG_SS; \
9568 } while (0)
9569
9570 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9571 {
9572/** @todo Check the effective address size crap! */
9573 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9574 {
9575 uint16_t u16EffAddr;
9576
9577 /* Handle the disp16 form with no registers first. */
9578 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9579 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9580 else
9581 {
9582 /* Get the displacment. */
9583 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9584 {
9585 case 0: u16EffAddr = 0; break;
9586 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9587 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9588 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9589 }
9590
9591 /* Add the base and index registers to the disp. */
9592 switch (bRm & X86_MODRM_RM_MASK)
9593 {
9594 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9595 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9596 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9597 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9598 case 4: u16EffAddr += pCtx->si; break;
9599 case 5: u16EffAddr += pCtx->di; break;
9600 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9601 case 7: u16EffAddr += pCtx->bx; break;
9602 }
9603 }
9604
9605 *pGCPtrEff = u16EffAddr;
9606 }
9607 else
9608 {
9609 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9610 uint32_t u32EffAddr;
9611
9612 /* Handle the disp32 form with no registers first. */
9613 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9614 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9615 else
9616 {
9617 /* Get the register (or SIB) value. */
9618 switch ((bRm & X86_MODRM_RM_MASK))
9619 {
9620 case 0: u32EffAddr = pCtx->eax; break;
9621 case 1: u32EffAddr = pCtx->ecx; break;
9622 case 2: u32EffAddr = pCtx->edx; break;
9623 case 3: u32EffAddr = pCtx->ebx; break;
9624 case 4: /* SIB */
9625 {
9626 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9627
9628 /* Get the index and scale it. */
9629 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9630 {
9631 case 0: u32EffAddr = pCtx->eax; break;
9632 case 1: u32EffAddr = pCtx->ecx; break;
9633 case 2: u32EffAddr = pCtx->edx; break;
9634 case 3: u32EffAddr = pCtx->ebx; break;
9635 case 4: u32EffAddr = 0; /*none */ break;
9636 case 5: u32EffAddr = pCtx->ebp; break;
9637 case 6: u32EffAddr = pCtx->esi; break;
9638 case 7: u32EffAddr = pCtx->edi; break;
9639 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9640 }
9641 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9642
9643 /* add base */
9644 switch (bSib & X86_SIB_BASE_MASK)
9645 {
9646 case 0: u32EffAddr += pCtx->eax; break;
9647 case 1: u32EffAddr += pCtx->ecx; break;
9648 case 2: u32EffAddr += pCtx->edx; break;
9649 case 3: u32EffAddr += pCtx->ebx; break;
9650 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9651 case 5:
9652 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9653 {
9654 u32EffAddr += pCtx->ebp;
9655 SET_SS_DEF();
9656 }
9657 else
9658 {
9659 uint32_t u32Disp;
9660 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9661 u32EffAddr += u32Disp;
9662 }
9663 break;
9664 case 6: u32EffAddr += pCtx->esi; break;
9665 case 7: u32EffAddr += pCtx->edi; break;
9666 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9667 }
9668 break;
9669 }
9670 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9671 case 6: u32EffAddr = pCtx->esi; break;
9672 case 7: u32EffAddr = pCtx->edi; break;
9673 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9674 }
9675
9676 /* Get and add the displacement. */
9677 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9678 {
9679 case 0:
9680 break;
9681 case 1:
9682 {
9683 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9684 u32EffAddr += i8Disp;
9685 break;
9686 }
9687 case 2:
9688 {
9689 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9690 u32EffAddr += u32Disp;
9691 break;
9692 }
9693 default:
9694 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9695 }
9696
9697 }
9698 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9699 *pGCPtrEff = u32EffAddr;
9700 else
9701 {
9702 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9703 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9704 }
9705 }
9706 }
9707 else
9708 {
9709 uint64_t u64EffAddr;
9710
9711 /* Handle the rip+disp32 form with no registers first. */
9712 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9713 {
9714 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9715 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9716 }
9717 else
9718 {
9719 /* Get the register (or SIB) value. */
9720 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9721 {
9722 case 0: u64EffAddr = pCtx->rax; break;
9723 case 1: u64EffAddr = pCtx->rcx; break;
9724 case 2: u64EffAddr = pCtx->rdx; break;
9725 case 3: u64EffAddr = pCtx->rbx; break;
9726 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9727 case 6: u64EffAddr = pCtx->rsi; break;
9728 case 7: u64EffAddr = pCtx->rdi; break;
9729 case 8: u64EffAddr = pCtx->r8; break;
9730 case 9: u64EffAddr = pCtx->r9; break;
9731 case 10: u64EffAddr = pCtx->r10; break;
9732 case 11: u64EffAddr = pCtx->r11; break;
9733 case 13: u64EffAddr = pCtx->r13; break;
9734 case 14: u64EffAddr = pCtx->r14; break;
9735 case 15: u64EffAddr = pCtx->r15; break;
9736 /* SIB */
9737 case 4:
9738 case 12:
9739 {
9740 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9741
9742 /* Get the index and scale it. */
9743 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9744 {
9745 case 0: u64EffAddr = pCtx->rax; break;
9746 case 1: u64EffAddr = pCtx->rcx; break;
9747 case 2: u64EffAddr = pCtx->rdx; break;
9748 case 3: u64EffAddr = pCtx->rbx; break;
9749 case 4: u64EffAddr = 0; /*none */ break;
9750 case 5: u64EffAddr = pCtx->rbp; break;
9751 case 6: u64EffAddr = pCtx->rsi; break;
9752 case 7: u64EffAddr = pCtx->rdi; break;
9753 case 8: u64EffAddr = pCtx->r8; break;
9754 case 9: u64EffAddr = pCtx->r9; break;
9755 case 10: u64EffAddr = pCtx->r10; break;
9756 case 11: u64EffAddr = pCtx->r11; break;
9757 case 12: u64EffAddr = pCtx->r12; break;
9758 case 13: u64EffAddr = pCtx->r13; break;
9759 case 14: u64EffAddr = pCtx->r14; break;
9760 case 15: u64EffAddr = pCtx->r15; break;
9761 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9762 }
9763 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9764
9765 /* add base */
9766 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9767 {
9768 case 0: u64EffAddr += pCtx->rax; break;
9769 case 1: u64EffAddr += pCtx->rcx; break;
9770 case 2: u64EffAddr += pCtx->rdx; break;
9771 case 3: u64EffAddr += pCtx->rbx; break;
9772 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9773 case 6: u64EffAddr += pCtx->rsi; break;
9774 case 7: u64EffAddr += pCtx->rdi; break;
9775 case 8: u64EffAddr += pCtx->r8; break;
9776 case 9: u64EffAddr += pCtx->r9; break;
9777 case 10: u64EffAddr += pCtx->r10; break;
9778 case 11: u64EffAddr += pCtx->r11; break;
9779 case 12: u64EffAddr += pCtx->r12; break;
9780 case 14: u64EffAddr += pCtx->r14; break;
9781 case 15: u64EffAddr += pCtx->r15; break;
9782 /* complicated encodings */
9783 case 5:
9784 case 13:
9785 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9786 {
9787 if (!pIemCpu->uRexB)
9788 {
9789 u64EffAddr += pCtx->rbp;
9790 SET_SS_DEF();
9791 }
9792 else
9793 u64EffAddr += pCtx->r13;
9794 }
9795 else
9796 {
9797 uint32_t u32Disp;
9798 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9799 u64EffAddr += (int32_t)u32Disp;
9800 }
9801 break;
9802 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9803 }
9804 break;
9805 }
9806 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9807 }
9808
9809 /* Get and add the displacement. */
9810 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9811 {
9812 case 0:
9813 break;
9814 case 1:
9815 {
9816 int8_t i8Disp;
9817 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9818 u64EffAddr += i8Disp;
9819 break;
9820 }
9821 case 2:
9822 {
9823 uint32_t u32Disp;
9824 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9825 u64EffAddr += (int32_t)u32Disp;
9826 break;
9827 }
9828 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9829 }
9830
9831 }
9832
9833 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9834 *pGCPtrEff = u64EffAddr;
9835 else
9836 {
9837 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9838 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9839 }
9840 }
9841
9842 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9843 return VINF_SUCCESS;
9844}
9845
9846/** @} */
9847
9848
9849
9850/*
9851 * Include the instructions
9852 */
9853#include "IEMAllInstructions.cpp.h"
9854
9855
9856
9857
9858#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9859
9860/**
9861 * Sets up execution verification mode.
9862 */
9863IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9864{
9865 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9866 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9867
9868 /*
9869 * Always note down the address of the current instruction.
9870 */
9871 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9872 pIemCpu->uOldRip = pOrgCtx->rip;
9873
9874 /*
9875 * Enable verification and/or logging.
9876 */
9877 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9878 if ( fNewNoRem
9879 && ( 0
9880#if 0 /* auto enable on first paged protected mode interrupt */
9881 || ( pOrgCtx->eflags.Bits.u1IF
9882 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9883 && TRPMHasTrap(pVCpu)
9884 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9885#endif
9886#if 0
9887 || ( pOrgCtx->cs == 0x10
9888 && ( pOrgCtx->rip == 0x90119e3e
9889 || pOrgCtx->rip == 0x901d9810)
9890#endif
9891#if 0 /* Auto enable DSL - FPU stuff. */
9892 || ( pOrgCtx->cs == 0x10
9893 && (// pOrgCtx->rip == 0xc02ec07f
9894 //|| pOrgCtx->rip == 0xc02ec082
9895 //|| pOrgCtx->rip == 0xc02ec0c9
9896 0
9897 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9898#endif
9899#if 0 /* Auto enable DSL - fstp st0 stuff. */
9900 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9901#endif
9902#if 0
9903 || pOrgCtx->rip == 0x9022bb3a
9904#endif
9905#if 0
9906 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9907#endif
9908#if 0
9909 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9910 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9911#endif
9912#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9913 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9914 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9915 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9916#endif
9917#if 0 /* NT4SP1 - xadd early boot. */
9918 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9919#endif
9920#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9921 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9922#endif
9923#if 0 /* NT4SP1 - cmpxchg (AMD). */
9924 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9925#endif
9926#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9927 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9928#endif
9929#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9930 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9931
9932#endif
9933#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9934 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9935
9936#endif
9937#if 0 /* NT4SP1 - frstor [ecx] */
9938 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9939#endif
9940#if 0 /* xxxxxx - All long mode code. */
9941 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9942#endif
9943#if 0 /* rep movsq linux 3.7 64-bit boot. */
9944 || (pOrgCtx->rip == 0x0000000000100241)
9945#endif
9946#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9947 || (pOrgCtx->rip == 0x000000000215e240)
9948#endif
9949#if 0 /* DOS's size-overridden iret to v8086. */
9950 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9951#endif
9952 )
9953 )
9954 {
9955 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9956 RTLogFlags(NULL, "enabled");
9957 fNewNoRem = false;
9958 }
9959 if (fNewNoRem != pIemCpu->fNoRem)
9960 {
9961 pIemCpu->fNoRem = fNewNoRem;
9962 if (!fNewNoRem)
9963 {
9964 LogAlways(("Enabling verification mode!\n"));
9965 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9966 }
9967 else
9968 LogAlways(("Disabling verification mode!\n"));
9969 }
9970
9971 /*
9972 * Switch state.
9973 */
9974 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9975 {
9976 static CPUMCTX s_DebugCtx; /* Ugly! */
9977
9978 s_DebugCtx = *pOrgCtx;
9979 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9980 }
9981
9982 /*
9983 * See if there is an interrupt pending in TRPM and inject it if we can.
9984 */
9985 pIemCpu->uInjectCpl = UINT8_MAX;
9986 if ( pOrgCtx->eflags.Bits.u1IF
9987 && TRPMHasTrap(pVCpu)
9988 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9989 {
9990 uint8_t u8TrapNo;
9991 TRPMEVENT enmType;
9992 RTGCUINT uErrCode;
9993 RTGCPTR uCr2;
9994 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9995 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9996 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9997 TRPMResetTrap(pVCpu);
9998 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9999 }
10000
10001 /*
10002 * Reset the counters.
10003 */
10004 pIemCpu->cIOReads = 0;
10005 pIemCpu->cIOWrites = 0;
10006 pIemCpu->fIgnoreRaxRdx = false;
10007 pIemCpu->fOverlappingMovs = false;
10008 pIemCpu->fProblematicMemory = false;
10009 pIemCpu->fUndefinedEFlags = 0;
10010
10011 if (IEM_VERIFICATION_ENABLED(pIemCpu))
10012 {
10013 /*
10014 * Free all verification records.
10015 */
10016 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
10017 pIemCpu->pIemEvtRecHead = NULL;
10018 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
10019 do
10020 {
10021 while (pEvtRec)
10022 {
10023 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
10024 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
10025 pIemCpu->pFreeEvtRec = pEvtRec;
10026 pEvtRec = pNext;
10027 }
10028 pEvtRec = pIemCpu->pOtherEvtRecHead;
10029 pIemCpu->pOtherEvtRecHead = NULL;
10030 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
10031 } while (pEvtRec);
10032 }
10033}
10034
10035
10036/**
10037 * Allocate an event record.
10038 * @returns Pointer to a record.
10039 */
10040IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
10041{
10042 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10043 return NULL;
10044
10045 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
10046 if (pEvtRec)
10047 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
10048 else
10049 {
10050 if (!pIemCpu->ppIemEvtRecNext)
10051 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
10052
10053 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
10054 if (!pEvtRec)
10055 return NULL;
10056 }
10057 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
10058 pEvtRec->pNext = NULL;
10059 return pEvtRec;
10060}
10061
10062
10063/**
10064 * IOMMMIORead notification.
10065 */
10066VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
10067{
10068 PVMCPU pVCpu = VMMGetCpu(pVM);
10069 if (!pVCpu)
10070 return;
10071 PIEMCPU pIemCpu = &pVCpu->iem.s;
10072 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10073 if (!pEvtRec)
10074 return;
10075 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
10076 pEvtRec->u.RamRead.GCPhys = GCPhys;
10077 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
10078 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10079 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10080}
10081
10082
10083/**
10084 * IOMMMIOWrite notification.
10085 */
10086VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
10087{
10088 PVMCPU pVCpu = VMMGetCpu(pVM);
10089 if (!pVCpu)
10090 return;
10091 PIEMCPU pIemCpu = &pVCpu->iem.s;
10092 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10093 if (!pEvtRec)
10094 return;
10095 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
10096 pEvtRec->u.RamWrite.GCPhys = GCPhys;
10097 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
10098 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
10099 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
10100 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
10101 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
10102 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10103 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10104}
10105
10106
10107/**
10108 * IOMIOPortRead notification.
10109 */
10110VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
10111{
10112 PVMCPU pVCpu = VMMGetCpu(pVM);
10113 if (!pVCpu)
10114 return;
10115 PIEMCPU pIemCpu = &pVCpu->iem.s;
10116 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10117 if (!pEvtRec)
10118 return;
10119 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10120 pEvtRec->u.IOPortRead.Port = Port;
10121 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10122 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10123 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10124}
10125
10126/**
10127 * IOMIOPortWrite notification.
10128 */
10129VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10130{
10131 PVMCPU pVCpu = VMMGetCpu(pVM);
10132 if (!pVCpu)
10133 return;
10134 PIEMCPU pIemCpu = &pVCpu->iem.s;
10135 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10136 if (!pEvtRec)
10137 return;
10138 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10139 pEvtRec->u.IOPortWrite.Port = Port;
10140 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10141 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10142 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10143 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10144}
10145
10146
10147VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10148{
10149 PVMCPU pVCpu = VMMGetCpu(pVM);
10150 if (!pVCpu)
10151 return;
10152 PIEMCPU pIemCpu = &pVCpu->iem.s;
10153 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10154 if (!pEvtRec)
10155 return;
10156 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
10157 pEvtRec->u.IOPortStrRead.Port = Port;
10158 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
10159 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
10160 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10161 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10162}
10163
10164
10165VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10166{
10167 PVMCPU pVCpu = VMMGetCpu(pVM);
10168 if (!pVCpu)
10169 return;
10170 PIEMCPU pIemCpu = &pVCpu->iem.s;
10171 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10172 if (!pEvtRec)
10173 return;
10174 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
10175 pEvtRec->u.IOPortStrWrite.Port = Port;
10176 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
10177 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
10178 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10179 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10180}
10181
10182
10183/**
10184 * Fakes and records an I/O port read.
10185 *
10186 * @returns VINF_SUCCESS.
10187 * @param pIemCpu The IEM per CPU data.
10188 * @param Port The I/O port.
10189 * @param pu32Value Where to store the fake value.
10190 * @param cbValue The size of the access.
10191 */
10192IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10193{
10194 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10195 if (pEvtRec)
10196 {
10197 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10198 pEvtRec->u.IOPortRead.Port = Port;
10199 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10200 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10201 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10202 }
10203 pIemCpu->cIOReads++;
10204 *pu32Value = 0xcccccccc;
10205 return VINF_SUCCESS;
10206}
10207
10208
10209/**
10210 * Fakes and records an I/O port write.
10211 *
10212 * @returns VINF_SUCCESS.
10213 * @param pIemCpu The IEM per CPU data.
10214 * @param Port The I/O port.
10215 * @param u32Value The value being written.
10216 * @param cbValue The size of the access.
10217 */
10218IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10219{
10220 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10221 if (pEvtRec)
10222 {
10223 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10224 pEvtRec->u.IOPortWrite.Port = Port;
10225 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10226 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10227 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10228 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10229 }
10230 pIemCpu->cIOWrites++;
10231 return VINF_SUCCESS;
10232}
10233
10234
10235/**
10236 * Used to add extra details about a stub case.
10237 * @param pIemCpu The IEM per CPU state.
10238 */
10239IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10240{
10241 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10242 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10243 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10244 char szRegs[4096];
10245 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10246 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10247 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10248 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10249 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10250 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10251 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10252 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10253 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10254 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10255 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10256 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10257 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10258 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10259 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10260 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10261 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10262 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10263 " efer=%016VR{efer}\n"
10264 " pat=%016VR{pat}\n"
10265 " sf_mask=%016VR{sf_mask}\n"
10266 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10267 " lstar=%016VR{lstar}\n"
10268 " star=%016VR{star} cstar=%016VR{cstar}\n"
10269 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10270 );
10271
10272 char szInstr1[256];
10273 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10274 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10275 szInstr1, sizeof(szInstr1), NULL);
10276 char szInstr2[256];
10277 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10278 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10279 szInstr2, sizeof(szInstr2), NULL);
10280
10281 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10282}
10283
10284
10285/**
10286 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10287 * dump to the assertion info.
10288 *
10289 * @param pEvtRec The record to dump.
10290 */
10291IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10292{
10293 switch (pEvtRec->enmEvent)
10294 {
10295 case IEMVERIFYEVENT_IOPORT_READ:
10296 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10297 pEvtRec->u.IOPortWrite.Port,
10298 pEvtRec->u.IOPortWrite.cbValue);
10299 break;
10300 case IEMVERIFYEVENT_IOPORT_WRITE:
10301 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10302 pEvtRec->u.IOPortWrite.Port,
10303 pEvtRec->u.IOPortWrite.cbValue,
10304 pEvtRec->u.IOPortWrite.u32Value);
10305 break;
10306 case IEMVERIFYEVENT_IOPORT_STR_READ:
10307 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
10308 pEvtRec->u.IOPortStrWrite.Port,
10309 pEvtRec->u.IOPortStrWrite.cbValue,
10310 pEvtRec->u.IOPortStrWrite.cTransfers);
10311 break;
10312 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10313 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
10314 pEvtRec->u.IOPortStrWrite.Port,
10315 pEvtRec->u.IOPortStrWrite.cbValue,
10316 pEvtRec->u.IOPortStrWrite.cTransfers);
10317 break;
10318 case IEMVERIFYEVENT_RAM_READ:
10319 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10320 pEvtRec->u.RamRead.GCPhys,
10321 pEvtRec->u.RamRead.cb);
10322 break;
10323 case IEMVERIFYEVENT_RAM_WRITE:
10324 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10325 pEvtRec->u.RamWrite.GCPhys,
10326 pEvtRec->u.RamWrite.cb,
10327 (int)pEvtRec->u.RamWrite.cb,
10328 pEvtRec->u.RamWrite.ab);
10329 break;
10330 default:
10331 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10332 break;
10333 }
10334}
10335
10336
10337/**
10338 * Raises an assertion on the specified record, showing the given message with
10339 * a record dump attached.
10340 *
10341 * @param pIemCpu The IEM per CPU data.
10342 * @param pEvtRec1 The first record.
10343 * @param pEvtRec2 The second record.
10344 * @param pszMsg The message explaining why we're asserting.
10345 */
10346IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10347{
10348 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10349 iemVerifyAssertAddRecordDump(pEvtRec1);
10350 iemVerifyAssertAddRecordDump(pEvtRec2);
10351 iemVerifyAssertMsg2(pIemCpu);
10352 RTAssertPanic();
10353}
10354
10355
10356/**
10357 * Raises an assertion on the specified record, showing the given message with
10358 * a record dump attached.
10359 *
10360 * @param pIemCpu The IEM per CPU data.
10361 * @param pEvtRec1 The first record.
10362 * @param pszMsg The message explaining why we're asserting.
10363 */
10364IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10365{
10366 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10367 iemVerifyAssertAddRecordDump(pEvtRec);
10368 iemVerifyAssertMsg2(pIemCpu);
10369 RTAssertPanic();
10370}
10371
10372
10373/**
10374 * Verifies a write record.
10375 *
10376 * @param pIemCpu The IEM per CPU data.
10377 * @param pEvtRec The write record.
10378 * @param fRem Set if REM was doing the other executing. If clear
10379 * it was HM.
10380 */
10381IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10382{
10383 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10384 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10385 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10386 if ( RT_FAILURE(rc)
10387 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10388 {
10389 /* fend off ins */
10390 if ( !pIemCpu->cIOReads
10391 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10392 || ( pEvtRec->u.RamWrite.cb != 1
10393 && pEvtRec->u.RamWrite.cb != 2
10394 && pEvtRec->u.RamWrite.cb != 4) )
10395 {
10396 /* fend off ROMs and MMIO */
10397 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10398 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10399 {
10400 /* fend off fxsave */
10401 if (pEvtRec->u.RamWrite.cb != 512)
10402 {
10403 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10404 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10405 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10406 RTAssertMsg2Add("%s: %.*Rhxs\n"
10407 "iem: %.*Rhxs\n",
10408 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10409 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10410 iemVerifyAssertAddRecordDump(pEvtRec);
10411 iemVerifyAssertMsg2(pIemCpu);
10412 RTAssertPanic();
10413 }
10414 }
10415 }
10416 }
10417
10418}
10419
10420/**
10421 * Performs the post-execution verfication checks.
10422 */
10423IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10424{
10425 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10426 return;
10427
10428 /*
10429 * Switch back the state.
10430 */
10431 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10432 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10433 Assert(pOrgCtx != pDebugCtx);
10434 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10435
10436 /*
10437 * Execute the instruction in REM.
10438 */
10439 bool fRem = false;
10440 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10441 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10442 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10443#ifdef IEM_VERIFICATION_MODE_FULL_HM
10444 if ( HMIsEnabled(pVM)
10445 && pIemCpu->cIOReads == 0
10446 && pIemCpu->cIOWrites == 0
10447 && !pIemCpu->fProblematicMemory)
10448 {
10449 uint64_t uStartRip = pOrgCtx->rip;
10450 unsigned iLoops = 0;
10451 do
10452 {
10453 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10454 iLoops++;
10455 } while ( rc == VINF_SUCCESS
10456 || ( rc == VINF_EM_DBG_STEPPED
10457 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10458 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10459 || ( pOrgCtx->rip != pDebugCtx->rip
10460 && pIemCpu->uInjectCpl != UINT8_MAX
10461 && iLoops < 8) );
10462 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10463 rc = VINF_SUCCESS;
10464 }
10465#endif
10466 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10467 || rc == VINF_IOM_R3_IOPORT_READ
10468 || rc == VINF_IOM_R3_IOPORT_WRITE
10469 || rc == VINF_IOM_R3_MMIO_READ
10470 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10471 || rc == VINF_IOM_R3_MMIO_WRITE
10472 || rc == VINF_CPUM_R3_MSR_READ
10473 || rc == VINF_CPUM_R3_MSR_WRITE
10474 || rc == VINF_EM_RESCHEDULE
10475 )
10476 {
10477 EMRemLock(pVM);
10478 rc = REMR3EmulateInstruction(pVM, pVCpu);
10479 AssertRC(rc);
10480 EMRemUnlock(pVM);
10481 fRem = true;
10482 }
10483
10484 /*
10485 * Compare the register states.
10486 */
10487 unsigned cDiffs = 0;
10488 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10489 {
10490 //Log(("REM and IEM ends up with different registers!\n"));
10491 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10492
10493# define CHECK_FIELD(a_Field) \
10494 do \
10495 { \
10496 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10497 { \
10498 switch (sizeof(pOrgCtx->a_Field)) \
10499 { \
10500 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10501 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10502 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10503 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10504 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10505 } \
10506 cDiffs++; \
10507 } \
10508 } while (0)
10509# define CHECK_XSTATE_FIELD(a_Field) \
10510 do \
10511 { \
10512 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10513 { \
10514 switch (sizeof(pOrgXState->a_Field)) \
10515 { \
10516 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10517 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10518 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10519 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10520 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10521 } \
10522 cDiffs++; \
10523 } \
10524 } while (0)
10525
10526# define CHECK_BIT_FIELD(a_Field) \
10527 do \
10528 { \
10529 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10530 { \
10531 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10532 cDiffs++; \
10533 } \
10534 } while (0)
10535
10536# define CHECK_SEL(a_Sel) \
10537 do \
10538 { \
10539 CHECK_FIELD(a_Sel.Sel); \
10540 CHECK_FIELD(a_Sel.Attr.u); \
10541 CHECK_FIELD(a_Sel.u64Base); \
10542 CHECK_FIELD(a_Sel.u32Limit); \
10543 CHECK_FIELD(a_Sel.fFlags); \
10544 } while (0)
10545
10546 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10547 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10548
10549#if 1 /* The recompiler doesn't update these the intel way. */
10550 if (fRem)
10551 {
10552 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10553 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10554 pOrgXState->x87.CS = pDebugXState->x87.CS;
10555 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10556 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10557 pOrgXState->x87.DS = pDebugXState->x87.DS;
10558 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10559 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10560 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10561 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10562 }
10563#endif
10564 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10565 {
10566 RTAssertMsg2Weak(" the FPU state differs\n");
10567 cDiffs++;
10568 CHECK_XSTATE_FIELD(x87.FCW);
10569 CHECK_XSTATE_FIELD(x87.FSW);
10570 CHECK_XSTATE_FIELD(x87.FTW);
10571 CHECK_XSTATE_FIELD(x87.FOP);
10572 CHECK_XSTATE_FIELD(x87.FPUIP);
10573 CHECK_XSTATE_FIELD(x87.CS);
10574 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10575 CHECK_XSTATE_FIELD(x87.FPUDP);
10576 CHECK_XSTATE_FIELD(x87.DS);
10577 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10578 CHECK_XSTATE_FIELD(x87.MXCSR);
10579 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10580 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10581 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10582 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10583 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10584 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10585 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10586 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10587 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10588 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10589 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10590 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10591 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10592 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10593 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10594 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10595 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10596 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10597 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10598 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10599 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10600 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10601 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10602 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10603 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10604 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10605 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10606 }
10607 CHECK_FIELD(rip);
10608 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10609 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10610 {
10611 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10612 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10613 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10614 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10615 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10616 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10617 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10618 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10619 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10620 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10621 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10622 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10623 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10624 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10625 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10626 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10627 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10628 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10629 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10630 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10631 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10632 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10633 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10634 }
10635
10636 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10637 CHECK_FIELD(rax);
10638 CHECK_FIELD(rcx);
10639 if (!pIemCpu->fIgnoreRaxRdx)
10640 CHECK_FIELD(rdx);
10641 CHECK_FIELD(rbx);
10642 CHECK_FIELD(rsp);
10643 CHECK_FIELD(rbp);
10644 CHECK_FIELD(rsi);
10645 CHECK_FIELD(rdi);
10646 CHECK_FIELD(r8);
10647 CHECK_FIELD(r9);
10648 CHECK_FIELD(r10);
10649 CHECK_FIELD(r11);
10650 CHECK_FIELD(r12);
10651 CHECK_FIELD(r13);
10652 CHECK_SEL(cs);
10653 CHECK_SEL(ss);
10654 CHECK_SEL(ds);
10655 CHECK_SEL(es);
10656 CHECK_SEL(fs);
10657 CHECK_SEL(gs);
10658 CHECK_FIELD(cr0);
10659
10660 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10661 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10662 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10663 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10664 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10665 {
10666 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10667 { /* ignore */ }
10668 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10669 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10670 && fRem)
10671 { /* ignore */ }
10672 else
10673 CHECK_FIELD(cr2);
10674 }
10675 CHECK_FIELD(cr3);
10676 CHECK_FIELD(cr4);
10677 CHECK_FIELD(dr[0]);
10678 CHECK_FIELD(dr[1]);
10679 CHECK_FIELD(dr[2]);
10680 CHECK_FIELD(dr[3]);
10681 CHECK_FIELD(dr[6]);
10682 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10683 CHECK_FIELD(dr[7]);
10684 CHECK_FIELD(gdtr.cbGdt);
10685 CHECK_FIELD(gdtr.pGdt);
10686 CHECK_FIELD(idtr.cbIdt);
10687 CHECK_FIELD(idtr.pIdt);
10688 CHECK_SEL(ldtr);
10689 CHECK_SEL(tr);
10690 CHECK_FIELD(SysEnter.cs);
10691 CHECK_FIELD(SysEnter.eip);
10692 CHECK_FIELD(SysEnter.esp);
10693 CHECK_FIELD(msrEFER);
10694 CHECK_FIELD(msrSTAR);
10695 CHECK_FIELD(msrPAT);
10696 CHECK_FIELD(msrLSTAR);
10697 CHECK_FIELD(msrCSTAR);
10698 CHECK_FIELD(msrSFMASK);
10699 CHECK_FIELD(msrKERNELGSBASE);
10700
10701 if (cDiffs != 0)
10702 {
10703 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10704 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10705 iemVerifyAssertMsg2(pIemCpu);
10706 RTAssertPanic();
10707 }
10708# undef CHECK_FIELD
10709# undef CHECK_BIT_FIELD
10710 }
10711
10712 /*
10713 * If the register state compared fine, check the verification event
10714 * records.
10715 */
10716 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10717 {
10718 /*
10719 * Compare verficiation event records.
10720 * - I/O port accesses should be a 1:1 match.
10721 */
10722 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10723 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10724 while (pIemRec && pOtherRec)
10725 {
10726 /* Since we might miss RAM writes and reads, ignore reads and check
10727 that any written memory is the same extra ones. */
10728 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10729 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10730 && pIemRec->pNext)
10731 {
10732 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10733 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10734 pIemRec = pIemRec->pNext;
10735 }
10736
10737 /* Do the compare. */
10738 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10739 {
10740 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10741 break;
10742 }
10743 bool fEquals;
10744 switch (pIemRec->enmEvent)
10745 {
10746 case IEMVERIFYEVENT_IOPORT_READ:
10747 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10748 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10749 break;
10750 case IEMVERIFYEVENT_IOPORT_WRITE:
10751 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10752 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10753 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10754 break;
10755 case IEMVERIFYEVENT_IOPORT_STR_READ:
10756 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
10757 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
10758 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
10759 break;
10760 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10761 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
10762 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
10763 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
10764 break;
10765 case IEMVERIFYEVENT_RAM_READ:
10766 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10767 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10768 break;
10769 case IEMVERIFYEVENT_RAM_WRITE:
10770 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10771 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10772 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10773 break;
10774 default:
10775 fEquals = false;
10776 break;
10777 }
10778 if (!fEquals)
10779 {
10780 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10781 break;
10782 }
10783
10784 /* advance */
10785 pIemRec = pIemRec->pNext;
10786 pOtherRec = pOtherRec->pNext;
10787 }
10788
10789 /* Ignore extra writes and reads. */
10790 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10791 {
10792 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10793 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10794 pIemRec = pIemRec->pNext;
10795 }
10796 if (pIemRec != NULL)
10797 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10798 else if (pOtherRec != NULL)
10799 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10800 }
10801 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10802}
10803
10804#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10805
10806/* stubs */
10807IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10808{
10809 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10810 return VERR_INTERNAL_ERROR;
10811}
10812
10813IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10814{
10815 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10816 return VERR_INTERNAL_ERROR;
10817}
10818
10819#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10820
10821
10822#ifdef LOG_ENABLED
10823/**
10824 * Logs the current instruction.
10825 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10826 * @param pCtx The current CPU context.
10827 * @param fSameCtx Set if we have the same context information as the VMM,
10828 * clear if we may have already executed an instruction in
10829 * our debug context. When clear, we assume IEMCPU holds
10830 * valid CPU mode info.
10831 */
10832IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10833{
10834# ifdef IN_RING3
10835 if (LogIs2Enabled())
10836 {
10837 char szInstr[256];
10838 uint32_t cbInstr = 0;
10839 if (fSameCtx)
10840 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10841 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10842 szInstr, sizeof(szInstr), &cbInstr);
10843 else
10844 {
10845 uint32_t fFlags = 0;
10846 switch (pVCpu->iem.s.enmCpuMode)
10847 {
10848 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10849 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10850 case IEMMODE_16BIT:
10851 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10852 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10853 else
10854 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10855 break;
10856 }
10857 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10858 szInstr, sizeof(szInstr), &cbInstr);
10859 }
10860
10861 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10862 Log2(("****\n"
10863 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10864 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10865 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10866 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10867 " %s\n"
10868 ,
10869 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10870 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10871 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10872 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10873 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10874 szInstr));
10875
10876 if (LogIs3Enabled())
10877 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10878 }
10879 else
10880# endif
10881 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10882 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10883}
10884#endif
10885
10886
10887/**
10888 * Makes status code addjustments (pass up from I/O and access handler)
10889 * as well as maintaining statistics.
10890 *
10891 * @returns Strict VBox status code to pass up.
10892 * @param pIemCpu The IEM per CPU data.
10893 * @param rcStrict The status from executing an instruction.
10894 */
10895DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10896{
10897 if (rcStrict != VINF_SUCCESS)
10898 {
10899 if (RT_SUCCESS(rcStrict))
10900 {
10901 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10902 || rcStrict == VINF_IOM_R3_IOPORT_READ
10903 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10904 || rcStrict == VINF_IOM_R3_MMIO_READ
10905 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10906 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10907 || rcStrict == VINF_CPUM_R3_MSR_READ
10908 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10909 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10910 || rcStrict == VINF_EM_RAW_TO_R3
10911 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
10912 /* raw-mode / virt handlers only: */
10913 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10914 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10915 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10916 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10917 || rcStrict == VINF_SELM_SYNC_GDT
10918 || rcStrict == VINF_CSAM_PENDING_ACTION
10919 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10920 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10921/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10922 int32_t const rcPassUp = pIemCpu->rcPassUp;
10923 if (rcPassUp == VINF_SUCCESS)
10924 pIemCpu->cRetInfStatuses++;
10925 else if ( rcPassUp < VINF_EM_FIRST
10926 || rcPassUp > VINF_EM_LAST
10927 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10928 {
10929 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10930 pIemCpu->cRetPassUpStatus++;
10931 rcStrict = rcPassUp;
10932 }
10933 else
10934 {
10935 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10936 pIemCpu->cRetInfStatuses++;
10937 }
10938 }
10939 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10940 pIemCpu->cRetAspectNotImplemented++;
10941 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10942 pIemCpu->cRetInstrNotImplemented++;
10943#ifdef IEM_VERIFICATION_MODE_FULL
10944 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10945 rcStrict = VINF_SUCCESS;
10946#endif
10947 else
10948 pIemCpu->cRetErrStatuses++;
10949 }
10950 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10951 {
10952 pIemCpu->cRetPassUpStatus++;
10953 rcStrict = pIemCpu->rcPassUp;
10954 }
10955
10956 return rcStrict;
10957}
10958
10959
10960/**
10961 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10962 * IEMExecOneWithPrefetchedByPC.
10963 *
10964 * @return Strict VBox status code.
10965 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10966 * @param pIemCpu The IEM per CPU data.
10967 * @param fExecuteInhibit If set, execute the instruction following CLI,
10968 * POP SS and MOV SS,GR.
10969 */
10970DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10971{
10972 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10973 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10974 if (rcStrict == VINF_SUCCESS)
10975 pIemCpu->cInstructions++;
10976 if (pIemCpu->cActiveMappings > 0)
10977 iemMemRollback(pIemCpu);
10978//#ifdef DEBUG
10979// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10980//#endif
10981
10982 /* Execute the next instruction as well if a cli, pop ss or
10983 mov ss, Gr has just completed successfully. */
10984 if ( fExecuteInhibit
10985 && rcStrict == VINF_SUCCESS
10986 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10987 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10988 {
10989 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10990 if (rcStrict == VINF_SUCCESS)
10991 {
10992# ifdef LOG_ENABLED
10993 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10994# endif
10995 IEM_OPCODE_GET_NEXT_U8(&b);
10996 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10997 if (rcStrict == VINF_SUCCESS)
10998 pIemCpu->cInstructions++;
10999 if (pIemCpu->cActiveMappings > 0)
11000 iemMemRollback(pIemCpu);
11001 }
11002 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
11003 }
11004
11005 /*
11006 * Return value fiddling, statistics and sanity assertions.
11007 */
11008 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11009
11010 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
11011 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
11012#if defined(IEM_VERIFICATION_MODE_FULL)
11013 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
11014 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
11015 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
11016 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
11017#endif
11018 return rcStrict;
11019}
11020
11021
11022#ifdef IN_RC
11023/**
11024 * Re-enters raw-mode or ensure we return to ring-3.
11025 *
11026 * @returns rcStrict, maybe modified.
11027 * @param pIemCpu The IEM CPU structure.
11028 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11029 * @param pCtx The current CPU context.
11030 * @param rcStrict The status code returne by the interpreter.
11031 */
11032DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
11033{
11034 if (!pIemCpu->fInPatchCode)
11035 CPUMRawEnter(pVCpu);
11036 return rcStrict;
11037}
11038#endif
11039
11040
11041/**
11042 * Execute one instruction.
11043 *
11044 * @return Strict VBox status code.
11045 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11046 */
11047VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
11048{
11049 PIEMCPU pIemCpu = &pVCpu->iem.s;
11050
11051#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11052 iemExecVerificationModeSetup(pIemCpu);
11053#endif
11054#ifdef LOG_ENABLED
11055 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11056 iemLogCurInstr(pVCpu, pCtx, true);
11057#endif
11058
11059 /*
11060 * Do the decoding and emulation.
11061 */
11062 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11063 if (rcStrict == VINF_SUCCESS)
11064 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11065
11066#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11067 /*
11068 * Assert some sanity.
11069 */
11070 iemExecVerificationModeCheck(pIemCpu);
11071#endif
11072#ifdef IN_RC
11073 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11074#endif
11075 if (rcStrict != VINF_SUCCESS)
11076 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11077 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11078 return rcStrict;
11079}
11080
11081
11082VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11083{
11084 PIEMCPU pIemCpu = &pVCpu->iem.s;
11085 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11086 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11087
11088 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11089 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11090 if (rcStrict == VINF_SUCCESS)
11091 {
11092 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11093 if (pcbWritten)
11094 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11095 }
11096
11097#ifdef IN_RC
11098 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11099#endif
11100 return rcStrict;
11101}
11102
11103
11104VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11105 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11106{
11107 PIEMCPU pIemCpu = &pVCpu->iem.s;
11108 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11109 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11110
11111 VBOXSTRICTRC rcStrict;
11112 if ( cbOpcodeBytes
11113 && pCtx->rip == OpcodeBytesPC)
11114 {
11115 iemInitDecoder(pIemCpu, false);
11116 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11117 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11118 rcStrict = VINF_SUCCESS;
11119 }
11120 else
11121 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11122 if (rcStrict == VINF_SUCCESS)
11123 {
11124 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11125 }
11126
11127#ifdef IN_RC
11128 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11129#endif
11130 return rcStrict;
11131}
11132
11133
11134VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11135{
11136 PIEMCPU pIemCpu = &pVCpu->iem.s;
11137 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11138 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11139
11140 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11141 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11142 if (rcStrict == VINF_SUCCESS)
11143 {
11144 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11145 if (pcbWritten)
11146 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11147 }
11148
11149#ifdef IN_RC
11150 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11151#endif
11152 return rcStrict;
11153}
11154
11155
11156VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11157 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11158{
11159 PIEMCPU pIemCpu = &pVCpu->iem.s;
11160 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11161 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11162
11163 VBOXSTRICTRC rcStrict;
11164 if ( cbOpcodeBytes
11165 && pCtx->rip == OpcodeBytesPC)
11166 {
11167 iemInitDecoder(pIemCpu, true);
11168 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11169 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11170 rcStrict = VINF_SUCCESS;
11171 }
11172 else
11173 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11174 if (rcStrict == VINF_SUCCESS)
11175 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11176
11177#ifdef IN_RC
11178 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11179#endif
11180 return rcStrict;
11181}
11182
11183
11184VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11185{
11186 PIEMCPU pIemCpu = &pVCpu->iem.s;
11187
11188 /*
11189 * See if there is an interrupt pending in TRPM and inject it if we can.
11190 */
11191#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11192 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11193# ifdef IEM_VERIFICATION_MODE_FULL
11194 pIemCpu->uInjectCpl = UINT8_MAX;
11195# endif
11196 if ( pCtx->eflags.Bits.u1IF
11197 && TRPMHasTrap(pVCpu)
11198 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11199 {
11200 uint8_t u8TrapNo;
11201 TRPMEVENT enmType;
11202 RTGCUINT uErrCode;
11203 RTGCPTR uCr2;
11204 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11205 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11206 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11207 TRPMResetTrap(pVCpu);
11208 }
11209#else
11210 iemExecVerificationModeSetup(pIemCpu);
11211 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11212#endif
11213
11214 /*
11215 * Log the state.
11216 */
11217#ifdef LOG_ENABLED
11218 iemLogCurInstr(pVCpu, pCtx, true);
11219#endif
11220
11221 /*
11222 * Do the decoding and emulation.
11223 */
11224 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11225 if (rcStrict == VINF_SUCCESS)
11226 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11227
11228#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11229 /*
11230 * Assert some sanity.
11231 */
11232 iemExecVerificationModeCheck(pIemCpu);
11233#endif
11234
11235 /*
11236 * Maybe re-enter raw-mode and log.
11237 */
11238#ifdef IN_RC
11239 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11240#endif
11241 if (rcStrict != VINF_SUCCESS)
11242 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11243 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11244 return rcStrict;
11245}
11246
11247
11248
11249/**
11250 * Injects a trap, fault, abort, software interrupt or external interrupt.
11251 *
11252 * The parameter list matches TRPMQueryTrapAll pretty closely.
11253 *
11254 * @returns Strict VBox status code.
11255 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11256 * @param u8TrapNo The trap number.
11257 * @param enmType What type is it (trap/fault/abort), software
11258 * interrupt or hardware interrupt.
11259 * @param uErrCode The error code if applicable.
11260 * @param uCr2 The CR2 value if applicable.
11261 * @param cbInstr The instruction length (only relevant for
11262 * software interrupts).
11263 */
11264VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11265 uint8_t cbInstr)
11266{
11267 iemInitDecoder(&pVCpu->iem.s, false);
11268#ifdef DBGFTRACE_ENABLED
11269 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11270 u8TrapNo, enmType, uErrCode, uCr2);
11271#endif
11272
11273 uint32_t fFlags;
11274 switch (enmType)
11275 {
11276 case TRPM_HARDWARE_INT:
11277 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11278 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11279 uErrCode = uCr2 = 0;
11280 break;
11281
11282 case TRPM_SOFTWARE_INT:
11283 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11284 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11285 uErrCode = uCr2 = 0;
11286 break;
11287
11288 case TRPM_TRAP:
11289 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11290 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11291 if (u8TrapNo == X86_XCPT_PF)
11292 fFlags |= IEM_XCPT_FLAGS_CR2;
11293 switch (u8TrapNo)
11294 {
11295 case X86_XCPT_DF:
11296 case X86_XCPT_TS:
11297 case X86_XCPT_NP:
11298 case X86_XCPT_SS:
11299 case X86_XCPT_PF:
11300 case X86_XCPT_AC:
11301 fFlags |= IEM_XCPT_FLAGS_ERR;
11302 break;
11303
11304 case X86_XCPT_NMI:
11305 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11306 break;
11307 }
11308 break;
11309
11310 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11311 }
11312
11313 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11314}
11315
11316
11317/**
11318 * Injects the active TRPM event.
11319 *
11320 * @returns Strict VBox status code.
11321 * @param pVCpu The cross context virtual CPU structure.
11322 */
11323VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11324{
11325#ifndef IEM_IMPLEMENTS_TASKSWITCH
11326 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11327#else
11328 uint8_t u8TrapNo;
11329 TRPMEVENT enmType;
11330 RTGCUINT uErrCode;
11331 RTGCUINTPTR uCr2;
11332 uint8_t cbInstr;
11333 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11334 if (RT_FAILURE(rc))
11335 return rc;
11336
11337 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11338
11339 /** @todo Are there any other codes that imply the event was successfully
11340 * delivered to the guest? See @bugref{6607}. */
11341 if ( rcStrict == VINF_SUCCESS
11342 || rcStrict == VINF_IEM_RAISED_XCPT)
11343 {
11344 TRPMResetTrap(pVCpu);
11345 }
11346 return rcStrict;
11347#endif
11348}
11349
11350
11351VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11352{
11353 return VERR_NOT_IMPLEMENTED;
11354}
11355
11356
11357VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11358{
11359 return VERR_NOT_IMPLEMENTED;
11360}
11361
11362
11363#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11364/**
11365 * Executes a IRET instruction with default operand size.
11366 *
11367 * This is for PATM.
11368 *
11369 * @returns VBox status code.
11370 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11371 * @param pCtxCore The register frame.
11372 */
11373VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11374{
11375 PIEMCPU pIemCpu = &pVCpu->iem.s;
11376 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11377
11378 iemCtxCoreToCtx(pCtx, pCtxCore);
11379 iemInitDecoder(pIemCpu);
11380 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11381 if (rcStrict == VINF_SUCCESS)
11382 iemCtxToCtxCore(pCtxCore, pCtx);
11383 else
11384 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11385 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11386 return rcStrict;
11387}
11388#endif
11389
11390
11391/**
11392 * Macro used by the IEMExec* method to check the given instruction length.
11393 *
11394 * Will return on failure!
11395 *
11396 * @param a_cbInstr The given instruction length.
11397 * @param a_cbMin The minimum length.
11398 */
11399#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11400 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11401 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11402
11403
11404/**
11405 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11406 *
11407 * This API ASSUMES that the caller has already verified that the guest code is
11408 * allowed to access the I/O port. (The I/O port is in the DX register in the
11409 * guest state.)
11410 *
11411 * @returns Strict VBox status code.
11412 * @param pVCpu The cross context virtual CPU structure.
11413 * @param cbValue The size of the I/O port access (1, 2, or 4).
11414 * @param enmAddrMode The addressing mode.
11415 * @param fRepPrefix Indicates whether a repeat prefix is used
11416 * (doesn't matter which for this instruction).
11417 * @param cbInstr The instruction length in bytes.
11418 * @param iEffSeg The effective segment address.
11419 */
11420VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11421 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11422{
11423 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11424 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11425
11426 /*
11427 * State init.
11428 */
11429 PIEMCPU pIemCpu = &pVCpu->iem.s;
11430 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11431
11432 /*
11433 * Switch orgy for getting to the right handler.
11434 */
11435 VBOXSTRICTRC rcStrict;
11436 if (fRepPrefix)
11437 {
11438 switch (enmAddrMode)
11439 {
11440 case IEMMODE_16BIT:
11441 switch (cbValue)
11442 {
11443 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11444 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11445 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11446 default:
11447 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11448 }
11449 break;
11450
11451 case IEMMODE_32BIT:
11452 switch (cbValue)
11453 {
11454 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11455 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11456 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11457 default:
11458 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11459 }
11460 break;
11461
11462 case IEMMODE_64BIT:
11463 switch (cbValue)
11464 {
11465 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11466 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11467 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11468 default:
11469 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11470 }
11471 break;
11472
11473 default:
11474 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11475 }
11476 }
11477 else
11478 {
11479 switch (enmAddrMode)
11480 {
11481 case IEMMODE_16BIT:
11482 switch (cbValue)
11483 {
11484 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11485 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11486 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11487 default:
11488 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11489 }
11490 break;
11491
11492 case IEMMODE_32BIT:
11493 switch (cbValue)
11494 {
11495 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11496 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11497 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11498 default:
11499 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11500 }
11501 break;
11502
11503 case IEMMODE_64BIT:
11504 switch (cbValue)
11505 {
11506 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11507 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11508 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11509 default:
11510 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11511 }
11512 break;
11513
11514 default:
11515 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11516 }
11517 }
11518
11519 iemUninitExec(pIemCpu);
11520 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11521}
11522
11523
11524/**
11525 * Interface for HM and EM for executing string I/O IN (read) instructions.
11526 *
11527 * This API ASSUMES that the caller has already verified that the guest code is
11528 * allowed to access the I/O port. (The I/O port is in the DX register in the
11529 * guest state.)
11530 *
11531 * @returns Strict VBox status code.
11532 * @param pVCpu The cross context virtual CPU structure.
11533 * @param cbValue The size of the I/O port access (1, 2, or 4).
11534 * @param enmAddrMode The addressing mode.
11535 * @param fRepPrefix Indicates whether a repeat prefix is used
11536 * (doesn't matter which for this instruction).
11537 * @param cbInstr The instruction length in bytes.
11538 */
11539VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11540 bool fRepPrefix, uint8_t cbInstr)
11541{
11542 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11543
11544 /*
11545 * State init.
11546 */
11547 PIEMCPU pIemCpu = &pVCpu->iem.s;
11548 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11549
11550 /*
11551 * Switch orgy for getting to the right handler.
11552 */
11553 VBOXSTRICTRC rcStrict;
11554 if (fRepPrefix)
11555 {
11556 switch (enmAddrMode)
11557 {
11558 case IEMMODE_16BIT:
11559 switch (cbValue)
11560 {
11561 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11562 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11563 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11564 default:
11565 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11566 }
11567 break;
11568
11569 case IEMMODE_32BIT:
11570 switch (cbValue)
11571 {
11572 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11573 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11574 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11575 default:
11576 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11577 }
11578 break;
11579
11580 case IEMMODE_64BIT:
11581 switch (cbValue)
11582 {
11583 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11584 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11585 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11586 default:
11587 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11588 }
11589 break;
11590
11591 default:
11592 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11593 }
11594 }
11595 else
11596 {
11597 switch (enmAddrMode)
11598 {
11599 case IEMMODE_16BIT:
11600 switch (cbValue)
11601 {
11602 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11603 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11604 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11605 default:
11606 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11607 }
11608 break;
11609
11610 case IEMMODE_32BIT:
11611 switch (cbValue)
11612 {
11613 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11614 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11615 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11616 default:
11617 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11618 }
11619 break;
11620
11621 case IEMMODE_64BIT:
11622 switch (cbValue)
11623 {
11624 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11625 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11626 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11627 default:
11628 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11629 }
11630 break;
11631
11632 default:
11633 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11634 }
11635 }
11636
11637 iemUninitExec(pIemCpu);
11638 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11639}
11640
11641
11642
11643/**
11644 * Interface for HM and EM to write to a CRx register.
11645 *
11646 * @returns Strict VBox status code.
11647 * @param pVCpu The cross context virtual CPU structure.
11648 * @param cbInstr The instruction length in bytes.
11649 * @param iCrReg The control register number (destination).
11650 * @param iGReg The general purpose register number (source).
11651 *
11652 * @remarks In ring-0 not all of the state needs to be synced in.
11653 */
11654VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11655{
11656 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11657 Assert(iCrReg < 16);
11658 Assert(iGReg < 16);
11659
11660 PIEMCPU pIemCpu = &pVCpu->iem.s;
11661 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11662 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11663 iemUninitExec(pIemCpu);
11664 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11665}
11666
11667
11668/**
11669 * Interface for HM and EM to read from a CRx register.
11670 *
11671 * @returns Strict VBox status code.
11672 * @param pVCpu The cross context virtual CPU structure.
11673 * @param cbInstr The instruction length in bytes.
11674 * @param iGReg The general purpose register number (destination).
11675 * @param iCrReg The control register number (source).
11676 *
11677 * @remarks In ring-0 not all of the state needs to be synced in.
11678 */
11679VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11680{
11681 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11682 Assert(iCrReg < 16);
11683 Assert(iGReg < 16);
11684
11685 PIEMCPU pIemCpu = &pVCpu->iem.s;
11686 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11687 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11688 iemUninitExec(pIemCpu);
11689 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11690}
11691
11692
11693/**
11694 * Interface for HM and EM to clear the CR0[TS] bit.
11695 *
11696 * @returns Strict VBox status code.
11697 * @param pVCpu The cross context virtual CPU structure.
11698 * @param cbInstr The instruction length in bytes.
11699 *
11700 * @remarks In ring-0 not all of the state needs to be synced in.
11701 */
11702VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11703{
11704 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11705
11706 PIEMCPU pIemCpu = &pVCpu->iem.s;
11707 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11708 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11709 iemUninitExec(pIemCpu);
11710 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11711}
11712
11713
11714/**
11715 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11716 *
11717 * @returns Strict VBox status code.
11718 * @param pVCpu The cross context virtual CPU structure.
11719 * @param cbInstr The instruction length in bytes.
11720 * @param uValue The value to load into CR0.
11721 *
11722 * @remarks In ring-0 not all of the state needs to be synced in.
11723 */
11724VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11725{
11726 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11727
11728 PIEMCPU pIemCpu = &pVCpu->iem.s;
11729 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11730 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11731 iemUninitExec(pIemCpu);
11732 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11733}
11734
11735
11736/**
11737 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11738 *
11739 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11740 *
11741 * @returns Strict VBox status code.
11742 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11743 * @param cbInstr The instruction length in bytes.
11744 * @remarks In ring-0 not all of the state needs to be synced in.
11745 * @thread EMT(pVCpu)
11746 */
11747VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11748{
11749 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11750
11751 PIEMCPU pIemCpu = &pVCpu->iem.s;
11752 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11753 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11754 iemUninitExec(pIemCpu);
11755 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11756}
11757
11758#ifdef IN_RING3
11759
11760/**
11761 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11762 *
11763 * @returns Merge between @a rcStrict and what the commit operation returned.
11764 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11765 * @param rcStrict The status code returned by ring-0 or raw-mode.
11766 */
11767VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3DoPendingAction(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
11768{
11769 PIEMCPU pIemCpu = &pVCpu->iem.s;
11770
11771 /*
11772 * Retrieve and reset the pending commit.
11773 */
11774 IEMCOMMIT const enmFn = pIemCpu->PendingCommit.enmFn;
11775 pIemCpu->PendingCommit.enmFn = IEMCOMMIT_INVALID;
11776 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11777
11778 /*
11779 * Must reset pass-up status code.
11780 */
11781 pIemCpu->rcPassUp = VINF_SUCCESS;
11782
11783 /*
11784 * Call the function. Currently using switch here instead of function
11785 * pointer table as a switch won't get skewed.
11786 */
11787 VBOXSTRICTRC rcStrictCommit;
11788 switch (enmFn)
11789 {
11790 case IEMCOMMIT_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11791 case IEMCOMMIT_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11792 case IEMCOMMIT_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11793 case IEMCOMMIT_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11794 case IEMCOMMIT_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11795 case IEMCOMMIT_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11796 case IEMCOMMIT_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11797 case IEMCOMMIT_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11798 case IEMCOMMIT_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11799 case IEMCOMMIT_REP_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11800 case IEMCOMMIT_REP_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11801 case IEMCOMMIT_REP_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11802 case IEMCOMMIT_REP_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11803 case IEMCOMMIT_REP_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11804 case IEMCOMMIT_REP_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11805 case IEMCOMMIT_REP_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11806 case IEMCOMMIT_REP_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11807 case IEMCOMMIT_REP_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11808 default:
11809 AssertLogRelMsgFailedReturn(("enmFn=%#x (%d)\n", pIemCpu->PendingCommit.enmFn, pIemCpu->PendingCommit.enmFn), VERR_IEM_IPE_2);
11810 }
11811
11812 /*
11813 * Merge status code (if any) with the incomming one.
11814 */
11815 rcStrictCommit = iemExecStatusCodeFiddling(pIemCpu, rcStrictCommit);
11816 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11817 return rcStrict;
11818 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11819 return rcStrictCommit;
11820
11821 /* Complicated. */
11822 if (RT_FAILURE(rcStrict))
11823 return rcStrict;
11824 if (RT_FAILURE(rcStrictCommit))
11825 return rcStrictCommit;
11826 if ( rcStrict >= VINF_EM_FIRST
11827 && rcStrict <= VINF_EM_LAST)
11828 {
11829 if ( rcStrictCommit >= VINF_EM_FIRST
11830 && rcStrictCommit <= VINF_EM_LAST)
11831 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11832
11833 /* This really shouldn't happen. Check PGM + handler code! */
11834 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_1);
11835 }
11836 /* This shouldn't really happen either, see IOM_SUCCESS. */
11837 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_2);
11838}
11839
11840#endif /* IN_RING */
11841
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette