VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 60307

Last change on this file since 60307 was 60189, checked in by vboxsync, 9 years ago

iemRaiseLoadStackFromTss32Or16: Try make GCC happy.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 447.4 KB
Line 
1/* $Id: IEMAll.cpp 60189 2016-03-24 20:02:47Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 *
72 */
73
74/** @def IEM_VERIFICATION_MODE_MINIMAL
75 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
76 * context. */
77#if defined(DOXYGEN_RUNNING)
78# define IEM_VERIFICATION_MODE_MINIMAL
79#endif
80//#define IEM_LOG_MEMORY_WRITES
81#define IEM_IMPLEMENTS_TASKSWITCH
82
83
84/*********************************************************************************************************************************
85* Header Files *
86*********************************************************************************************************************************/
87#define LOG_GROUP LOG_GROUP_IEM
88#include <VBox/vmm/iem.h>
89#include <VBox/vmm/cpum.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <internal/pgm.h>
93#include <VBox/vmm/iom.h>
94#include <VBox/vmm/em.h>
95#include <VBox/vmm/hm.h>
96#include <VBox/vmm/tm.h>
97#include <VBox/vmm/dbgf.h>
98#include <VBox/vmm/dbgftrace.h>
99#ifdef VBOX_WITH_RAW_MODE_NOT_R0
100# include <VBox/vmm/patm.h>
101# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
102# include <VBox/vmm/csam.h>
103# endif
104#endif
105#include "IEMInternal.h"
106#ifdef IEM_VERIFICATION_MODE_FULL
107# include <VBox/vmm/rem.h>
108# include <VBox/vmm/mm.h>
109#endif
110#include <VBox/vmm/vm.h>
111#include <VBox/log.h>
112#include <VBox/err.h>
113#include <VBox/param.h>
114#include <VBox/dis.h>
115#include <VBox/disopcode.h>
116#include <iprt/assert.h>
117#include <iprt/string.h>
118#include <iprt/x86.h>
119
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/** @typedef PFNIEMOP
126 * Pointer to an opcode decoder function.
127 */
128
129/** @def FNIEMOP_DEF
130 * Define an opcode decoder function.
131 *
132 * We're using macors for this so that adding and removing parameters as well as
133 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
134 *
135 * @param a_Name The function name.
136 */
137
138
139#if defined(__GNUC__) && defined(RT_ARCH_X86)
140typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
141# define FNIEMOP_DEF(a_Name) \
142 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
143# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
144 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
145# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
146 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
147
148#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
149typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
150# define FNIEMOP_DEF(a_Name) \
151 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
152# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
153 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
154# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
155 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
156
157#elif defined(__GNUC__)
158typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
159# define FNIEMOP_DEF(a_Name) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
161# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
163# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
165
166#else
167typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
168# define FNIEMOP_DEF(a_Name) \
169 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
170# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
171 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
173 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
174
175#endif
176
177
178/**
179 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
180 */
181typedef union IEMSELDESC
182{
183 /** The legacy view. */
184 X86DESC Legacy;
185 /** The long mode view. */
186 X86DESC64 Long;
187} IEMSELDESC;
188/** Pointer to a selector descriptor table entry. */
189typedef IEMSELDESC *PIEMSELDESC;
190
191
192/*********************************************************************************************************************************
193* Defined Constants And Macros *
194*********************************************************************************************************************************/
195/** Temporary hack to disable the double execution. Will be removed in favor
196 * of a dedicated execution mode in EM. */
197//#define IEM_VERIFICATION_MODE_NO_REM
198
199/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
200 * due to GCC lacking knowledge about the value range of a switch. */
201#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
202
203/**
204 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
205 * occation.
206 */
207#ifdef LOG_ENABLED
208# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
209 do { \
210 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
212 } while (0)
213#else
214# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
215 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
216#endif
217
218/**
219 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
220 * occation using the supplied logger statement.
221 *
222 * @param a_LoggerArgs What to log on failure.
223 */
224#ifdef LOG_ENABLED
225# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
226 do { \
227 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
228 /*LogFunc(a_LoggerArgs);*/ \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
230 } while (0)
231#else
232# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
233 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
234#endif
235
236/**
237 * Call an opcode decoder function.
238 *
239 * We're using macors for this so that adding and removing parameters can be
240 * done as we please. See FNIEMOP_DEF.
241 */
242#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
243
244/**
245 * Call a common opcode decoder function taking one extra argument.
246 *
247 * We're using macors for this so that adding and removing parameters can be
248 * done as we please. See FNIEMOP_DEF_1.
249 */
250#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
251
252/**
253 * Call a common opcode decoder function taking one extra argument.
254 *
255 * We're using macors for this so that adding and removing parameters can be
256 * done as we please. See FNIEMOP_DEF_1.
257 */
258#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
259
260/**
261 * Check if we're currently executing in real or virtual 8086 mode.
262 *
263 * @returns @c true if it is, @c false if not.
264 * @param a_pIemCpu The IEM state of the current CPU.
265 */
266#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
267
268/**
269 * Check if we're currently executing in virtual 8086 mode.
270 *
271 * @returns @c true if it is, @c false if not.
272 * @param a_pIemCpu The IEM state of the current CPU.
273 */
274#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
275
276/**
277 * Check if we're currently executing in long mode.
278 *
279 * @returns @c true if it is, @c false if not.
280 * @param a_pIemCpu The IEM state of the current CPU.
281 */
282#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
283
284/**
285 * Check if we're currently executing in real mode.
286 *
287 * @returns @c true if it is, @c false if not.
288 * @param a_pIemCpu The IEM state of the current CPU.
289 */
290#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
291
292/**
293 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
294 * @returns PCCPUMFEATURES
295 * @param a_pIemCpu The IEM state of the current CPU.
296 */
297#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
298
299/**
300 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
301 * @returns PCCPUMFEATURES
302 * @param a_pIemCpu The IEM state of the current CPU.
303 */
304#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
305
306/**
307 * Evaluates to true if we're presenting an Intel CPU to the guest.
308 */
309#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
310
311/**
312 * Evaluates to true if we're presenting an AMD CPU to the guest.
313 */
314#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
315
316/**
317 * Check if the address is canonical.
318 */
319#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
320
321
322/*********************************************************************************************************************************
323* Global Variables *
324*********************************************************************************************************************************/
325extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
326
327
328/** Function table for the ADD instruction. */
329IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
330{
331 iemAImpl_add_u8, iemAImpl_add_u8_locked,
332 iemAImpl_add_u16, iemAImpl_add_u16_locked,
333 iemAImpl_add_u32, iemAImpl_add_u32_locked,
334 iemAImpl_add_u64, iemAImpl_add_u64_locked
335};
336
337/** Function table for the ADC instruction. */
338IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
339{
340 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
341 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
342 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
343 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
344};
345
346/** Function table for the SUB instruction. */
347IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
348{
349 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
350 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
351 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
352 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
353};
354
355/** Function table for the SBB instruction. */
356IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
357{
358 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
359 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
360 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
361 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
362};
363
364/** Function table for the OR instruction. */
365IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
366{
367 iemAImpl_or_u8, iemAImpl_or_u8_locked,
368 iemAImpl_or_u16, iemAImpl_or_u16_locked,
369 iemAImpl_or_u32, iemAImpl_or_u32_locked,
370 iemAImpl_or_u64, iemAImpl_or_u64_locked
371};
372
373/** Function table for the XOR instruction. */
374IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
375{
376 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
377 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
378 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
379 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
380};
381
382/** Function table for the AND instruction. */
383IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
384{
385 iemAImpl_and_u8, iemAImpl_and_u8_locked,
386 iemAImpl_and_u16, iemAImpl_and_u16_locked,
387 iemAImpl_and_u32, iemAImpl_and_u32_locked,
388 iemAImpl_and_u64, iemAImpl_and_u64_locked
389};
390
391/** Function table for the CMP instruction.
392 * @remarks Making operand order ASSUMPTIONS.
393 */
394IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
395{
396 iemAImpl_cmp_u8, NULL,
397 iemAImpl_cmp_u16, NULL,
398 iemAImpl_cmp_u32, NULL,
399 iemAImpl_cmp_u64, NULL
400};
401
402/** Function table for the TEST instruction.
403 * @remarks Making operand order ASSUMPTIONS.
404 */
405IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
406{
407 iemAImpl_test_u8, NULL,
408 iemAImpl_test_u16, NULL,
409 iemAImpl_test_u32, NULL,
410 iemAImpl_test_u64, NULL
411};
412
413/** Function table for the BT instruction. */
414IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
415{
416 NULL, NULL,
417 iemAImpl_bt_u16, NULL,
418 iemAImpl_bt_u32, NULL,
419 iemAImpl_bt_u64, NULL
420};
421
422/** Function table for the BTC instruction. */
423IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
424{
425 NULL, NULL,
426 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
427 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
428 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
429};
430
431/** Function table for the BTR instruction. */
432IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
433{
434 NULL, NULL,
435 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
436 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
437 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
438};
439
440/** Function table for the BTS instruction. */
441IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
442{
443 NULL, NULL,
444 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
445 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
446 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
447};
448
449/** Function table for the BSF instruction. */
450IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
451{
452 NULL, NULL,
453 iemAImpl_bsf_u16, NULL,
454 iemAImpl_bsf_u32, NULL,
455 iemAImpl_bsf_u64, NULL
456};
457
458/** Function table for the BSR instruction. */
459IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
460{
461 NULL, NULL,
462 iemAImpl_bsr_u16, NULL,
463 iemAImpl_bsr_u32, NULL,
464 iemAImpl_bsr_u64, NULL
465};
466
467/** Function table for the IMUL instruction. */
468IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
469{
470 NULL, NULL,
471 iemAImpl_imul_two_u16, NULL,
472 iemAImpl_imul_two_u32, NULL,
473 iemAImpl_imul_two_u64, NULL
474};
475
476/** Group 1 /r lookup table. */
477IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
478{
479 &g_iemAImpl_add,
480 &g_iemAImpl_or,
481 &g_iemAImpl_adc,
482 &g_iemAImpl_sbb,
483 &g_iemAImpl_and,
484 &g_iemAImpl_sub,
485 &g_iemAImpl_xor,
486 &g_iemAImpl_cmp
487};
488
489/** Function table for the INC instruction. */
490IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
491{
492 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
493 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
494 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
495 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
496};
497
498/** Function table for the DEC instruction. */
499IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
500{
501 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
502 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
503 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
504 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
505};
506
507/** Function table for the NEG instruction. */
508IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
509{
510 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
511 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
512 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
513 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
514};
515
516/** Function table for the NOT instruction. */
517IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
518{
519 iemAImpl_not_u8, iemAImpl_not_u8_locked,
520 iemAImpl_not_u16, iemAImpl_not_u16_locked,
521 iemAImpl_not_u32, iemAImpl_not_u32_locked,
522 iemAImpl_not_u64, iemAImpl_not_u64_locked
523};
524
525
526/** Function table for the ROL instruction. */
527IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
528{
529 iemAImpl_rol_u8,
530 iemAImpl_rol_u16,
531 iemAImpl_rol_u32,
532 iemAImpl_rol_u64
533};
534
535/** Function table for the ROR instruction. */
536IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
537{
538 iemAImpl_ror_u8,
539 iemAImpl_ror_u16,
540 iemAImpl_ror_u32,
541 iemAImpl_ror_u64
542};
543
544/** Function table for the RCL instruction. */
545IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
546{
547 iemAImpl_rcl_u8,
548 iemAImpl_rcl_u16,
549 iemAImpl_rcl_u32,
550 iemAImpl_rcl_u64
551};
552
553/** Function table for the RCR instruction. */
554IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
555{
556 iemAImpl_rcr_u8,
557 iemAImpl_rcr_u16,
558 iemAImpl_rcr_u32,
559 iemAImpl_rcr_u64
560};
561
562/** Function table for the SHL instruction. */
563IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
564{
565 iemAImpl_shl_u8,
566 iemAImpl_shl_u16,
567 iemAImpl_shl_u32,
568 iemAImpl_shl_u64
569};
570
571/** Function table for the SHR instruction. */
572IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
573{
574 iemAImpl_shr_u8,
575 iemAImpl_shr_u16,
576 iemAImpl_shr_u32,
577 iemAImpl_shr_u64
578};
579
580/** Function table for the SAR instruction. */
581IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
582{
583 iemAImpl_sar_u8,
584 iemAImpl_sar_u16,
585 iemAImpl_sar_u32,
586 iemAImpl_sar_u64
587};
588
589
590/** Function table for the MUL instruction. */
591IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
592{
593 iemAImpl_mul_u8,
594 iemAImpl_mul_u16,
595 iemAImpl_mul_u32,
596 iemAImpl_mul_u64
597};
598
599/** Function table for the IMUL instruction working implicitly on rAX. */
600IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
601{
602 iemAImpl_imul_u8,
603 iemAImpl_imul_u16,
604 iemAImpl_imul_u32,
605 iemAImpl_imul_u64
606};
607
608/** Function table for the DIV instruction. */
609IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
610{
611 iemAImpl_div_u8,
612 iemAImpl_div_u16,
613 iemAImpl_div_u32,
614 iemAImpl_div_u64
615};
616
617/** Function table for the MUL instruction. */
618IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
619{
620 iemAImpl_idiv_u8,
621 iemAImpl_idiv_u16,
622 iemAImpl_idiv_u32,
623 iemAImpl_idiv_u64
624};
625
626/** Function table for the SHLD instruction */
627IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
628{
629 iemAImpl_shld_u16,
630 iemAImpl_shld_u32,
631 iemAImpl_shld_u64,
632};
633
634/** Function table for the SHRD instruction */
635IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
636{
637 iemAImpl_shrd_u16,
638 iemAImpl_shrd_u32,
639 iemAImpl_shrd_u64,
640};
641
642
643/** Function table for the PUNPCKLBW instruction */
644IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
645/** Function table for the PUNPCKLBD instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
647/** Function table for the PUNPCKLDQ instruction */
648IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
649/** Function table for the PUNPCKLQDQ instruction */
650IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
651
652/** Function table for the PUNPCKHBW instruction */
653IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
654/** Function table for the PUNPCKHBD instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
656/** Function table for the PUNPCKHDQ instruction */
657IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
658/** Function table for the PUNPCKHQDQ instruction */
659IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
660
661/** Function table for the PXOR instruction */
662IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
663/** Function table for the PCMPEQB instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
665/** Function table for the PCMPEQW instruction */
666IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
667/** Function table for the PCMPEQD instruction */
668IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
669
670
671#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
672/** What IEM just wrote. */
673uint8_t g_abIemWrote[256];
674/** How much IEM just wrote. */
675size_t g_cbIemWrote;
676#endif
677
678
679/*********************************************************************************************************************************
680* Internal Functions *
681*********************************************************************************************************************************/
682IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
683IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
684IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
685IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
686/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
687IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
688IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
689IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
691IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
692IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
693IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
694IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
695IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
696IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
698IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
699IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
700IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
701IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
706IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
707IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
708IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
709IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
710IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
711IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
712IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
713IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
714IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
715
716#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
717IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
718#endif
719IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
720IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
721
722
723
724/**
725 * Sets the pass up status.
726 *
727 * @returns VINF_SUCCESS.
728 * @param pIemCpu The per CPU IEM state of the calling thread.
729 * @param rcPassUp The pass up status. Must be informational.
730 * VINF_SUCCESS is not allowed.
731 */
732IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
733{
734 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
735
736 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
737 if (rcOldPassUp == VINF_SUCCESS)
738 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
739 /* If both are EM scheduling codes, use EM priority rules. */
740 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
741 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
742 {
743 if (rcPassUp < rcOldPassUp)
744 {
745 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
746 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
747 }
748 else
749 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
750 }
751 /* Override EM scheduling with specific status code. */
752 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
753 {
754 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
755 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
756 }
757 /* Don't override specific status code, first come first served. */
758 else
759 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
760 return VINF_SUCCESS;
761}
762
763
764/**
765 * Calculates the CPU mode.
766 *
767 * This is mainly for updating IEMCPU::enmCpuMode.
768 *
769 * @returns CPU mode.
770 * @param pCtx The register context for the CPU.
771 */
772DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
773{
774 if (CPUMIsGuestIn64BitCodeEx(pCtx))
775 return IEMMODE_64BIT;
776 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
777 return IEMMODE_32BIT;
778 return IEMMODE_16BIT;
779}
780
781
782/**
783 * Initializes the execution state.
784 *
785 * @param pIemCpu The per CPU IEM state.
786 * @param fBypassHandlers Whether to bypass access handlers.
787 *
788 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
789 * side-effects in strict builds.
790 */
791DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
792{
793 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
794 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
795
796 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
797 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
798
799#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
800 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
801 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
802 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
804 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
805 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
807 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
808#endif
809
810#ifdef VBOX_WITH_RAW_MODE_NOT_R0
811 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
812#endif
813 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
814 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
815#ifdef VBOX_STRICT
816 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
817 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
818 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
819 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
820 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
821 pIemCpu->uRexReg = 127;
822 pIemCpu->uRexB = 127;
823 pIemCpu->uRexIndex = 127;
824 pIemCpu->iEffSeg = 127;
825 pIemCpu->offOpcode = 127;
826 pIemCpu->cbOpcode = 127;
827#endif
828
829 pIemCpu->cActiveMappings = 0;
830 pIemCpu->iNextMapping = 0;
831 pIemCpu->rcPassUp = VINF_SUCCESS;
832 pIemCpu->fBypassHandlers = fBypassHandlers;
833#ifdef VBOX_WITH_RAW_MODE_NOT_R0
834 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
835 && pCtx->cs.u64Base == 0
836 && pCtx->cs.u32Limit == UINT32_MAX
837 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
838 if (!pIemCpu->fInPatchCode)
839 CPUMRawLeave(pVCpu, VINF_SUCCESS);
840#endif
841
842#ifdef IEM_VERIFICATION_MODE_FULL
843 pIemCpu->fNoRemSavedByExec = pIemCpu->fNoRem;
844 pIemCpu->fNoRem = true;
845#endif
846}
847
848
849/**
850 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
851 *
852 * @param pIemCpu The per CPU IEM state.
853 */
854DECLINLINE(void) iemUninitExec(PIEMCPU pIemCpu)
855{
856#ifdef IEM_VERIFICATION_MODE_FULL
857 pIemCpu->fNoRem = pIemCpu->fNoRemSavedByExec;
858#endif
859#ifdef VBOX_STRICT
860 pIemCpu->cbOpcode = 0;
861#else
862 NOREF(pIemCpu);
863#endif
864}
865
866
867/**
868 * Initializes the decoder state.
869 *
870 * @param pIemCpu The per CPU IEM state.
871 * @param fBypassHandlers Whether to bypass access handlers.
872 */
873DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
874{
875 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
876 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
877
878 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
879 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
880
881#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
882 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
883 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
885 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
886 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
887 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
888 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
889 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
890#endif
891
892#ifdef VBOX_WITH_RAW_MODE_NOT_R0
893 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
894#endif
895 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
896#ifdef IEM_VERIFICATION_MODE_FULL
897 if (pIemCpu->uInjectCpl != UINT8_MAX)
898 pIemCpu->uCpl = pIemCpu->uInjectCpl;
899#endif
900 IEMMODE enmMode = iemCalcCpuMode(pCtx);
901 pIemCpu->enmCpuMode = enmMode;
902 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
903 pIemCpu->enmEffAddrMode = enmMode;
904 if (enmMode != IEMMODE_64BIT)
905 {
906 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
907 pIemCpu->enmEffOpSize = enmMode;
908 }
909 else
910 {
911 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
912 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
913 }
914 pIemCpu->fPrefixes = 0;
915 pIemCpu->uRexReg = 0;
916 pIemCpu->uRexB = 0;
917 pIemCpu->uRexIndex = 0;
918 pIemCpu->iEffSeg = X86_SREG_DS;
919 pIemCpu->offOpcode = 0;
920 pIemCpu->cbOpcode = 0;
921 pIemCpu->cActiveMappings = 0;
922 pIemCpu->iNextMapping = 0;
923 pIemCpu->rcPassUp = VINF_SUCCESS;
924 pIemCpu->fBypassHandlers = fBypassHandlers;
925#ifdef VBOX_WITH_RAW_MODE_NOT_R0
926 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
927 && pCtx->cs.u64Base == 0
928 && pCtx->cs.u32Limit == UINT32_MAX
929 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
930 if (!pIemCpu->fInPatchCode)
931 CPUMRawLeave(pVCpu, VINF_SUCCESS);
932#endif
933
934#ifdef DBGFTRACE_ENABLED
935 switch (enmMode)
936 {
937 case IEMMODE_64BIT:
938 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
939 break;
940 case IEMMODE_32BIT:
941 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
942 break;
943 case IEMMODE_16BIT:
944 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
945 break;
946 }
947#endif
948}
949
950
951/**
952 * Prefetch opcodes the first time when starting executing.
953 *
954 * @returns Strict VBox status code.
955 * @param pIemCpu The IEM state.
956 * @param fBypassHandlers Whether to bypass access handlers.
957 */
958IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
959{
960#ifdef IEM_VERIFICATION_MODE_FULL
961 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
962#endif
963 iemInitDecoder(pIemCpu, fBypassHandlers);
964
965 /*
966 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
967 *
968 * First translate CS:rIP to a physical address.
969 */
970 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
971 uint32_t cbToTryRead;
972 RTGCPTR GCPtrPC;
973 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
974 {
975 cbToTryRead = PAGE_SIZE;
976 GCPtrPC = pCtx->rip;
977 if (!IEM_IS_CANONICAL(GCPtrPC))
978 return iemRaiseGeneralProtectionFault0(pIemCpu);
979 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
980 }
981 else
982 {
983 uint32_t GCPtrPC32 = pCtx->eip;
984 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
985 if (GCPtrPC32 > pCtx->cs.u32Limit)
986 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
987 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
988 if (!cbToTryRead) /* overflowed */
989 {
990 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
991 cbToTryRead = UINT32_MAX;
992 }
993 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
994 Assert(GCPtrPC <= UINT32_MAX);
995 }
996
997#ifdef VBOX_WITH_RAW_MODE_NOT_R0
998 /* Allow interpretation of patch manager code blocks since they can for
999 instance throw #PFs for perfectly good reasons. */
1000 if (pIemCpu->fInPatchCode)
1001 {
1002 size_t cbRead = 0;
1003 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
1004 AssertRCReturn(rc, rc);
1005 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1006 return VINF_SUCCESS;
1007 }
1008#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1009
1010 RTGCPHYS GCPhys;
1011 uint64_t fFlags;
1012 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
1013 if (RT_FAILURE(rc))
1014 {
1015 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1016 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1017 }
1018 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1019 {
1020 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1021 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1022 }
1023 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1024 {
1025 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1026 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1027 }
1028 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1029 /** @todo Check reserved bits and such stuff. PGM is better at doing
1030 * that, so do it when implementing the guest virtual address
1031 * TLB... */
1032
1033#ifdef IEM_VERIFICATION_MODE_FULL
1034 /*
1035 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1036 * instruction.
1037 */
1038 /** @todo optimize this differently by not using PGMPhysRead. */
1039 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1040 pIemCpu->GCPhysOpcodes = GCPhys;
1041 if ( offPrevOpcodes < cbOldOpcodes
1042 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1043 {
1044 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1045 Assert(cbNew <= RT_ELEMENTS(pIemCpu->abOpcode));
1046 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1047 pIemCpu->cbOpcode = cbNew;
1048 return VINF_SUCCESS;
1049 }
1050#endif
1051
1052 /*
1053 * Read the bytes at this address.
1054 */
1055 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1056#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1057 size_t cbActual;
1058 if ( PATMIsEnabled(pVM)
1059 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1060 {
1061 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1062 Assert(cbActual > 0);
1063 pIemCpu->cbOpcode = (uint8_t)cbActual;
1064 }
1065 else
1066#endif
1067 {
1068 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1069 if (cbToTryRead > cbLeftOnPage)
1070 cbToTryRead = cbLeftOnPage;
1071 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1072 cbToTryRead = sizeof(pIemCpu->abOpcode);
1073
1074 if (!pIemCpu->fBypassHandlers)
1075 {
1076 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1077 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1078 { /* likely */ }
1079 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1080 {
1081 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1082 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1083 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1084 }
1085 else
1086 {
1087 Log((RT_SUCCESS(rcStrict)
1088 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1089 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1090 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1091 return rcStrict;
1092 }
1093 }
1094 else
1095 {
1096 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1097 if (RT_SUCCESS(rc))
1098 { /* likely */ }
1099 else
1100 {
1101 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1102 GCPtrPC, GCPhys, rc, cbToTryRead));
1103 return rc;
1104 }
1105 }
1106 pIemCpu->cbOpcode = cbToTryRead;
1107 }
1108
1109 return VINF_SUCCESS;
1110}
1111
1112
1113/**
1114 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1115 * exception if it fails.
1116 *
1117 * @returns Strict VBox status code.
1118 * @param pIemCpu The IEM state.
1119 * @param cbMin The minimum number of bytes relative offOpcode
1120 * that must be read.
1121 */
1122IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1123{
1124 /*
1125 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1126 *
1127 * First translate CS:rIP to a physical address.
1128 */
1129 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1130 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1131 uint32_t cbToTryRead;
1132 RTGCPTR GCPtrNext;
1133 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1134 {
1135 cbToTryRead = PAGE_SIZE;
1136 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1137 if (!IEM_IS_CANONICAL(GCPtrNext))
1138 return iemRaiseGeneralProtectionFault0(pIemCpu);
1139 }
1140 else
1141 {
1142 uint32_t GCPtrNext32 = pCtx->eip;
1143 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1144 GCPtrNext32 += pIemCpu->cbOpcode;
1145 if (GCPtrNext32 > pCtx->cs.u32Limit)
1146 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1147 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1148 if (!cbToTryRead) /* overflowed */
1149 {
1150 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1151 cbToTryRead = UINT32_MAX;
1152 /** @todo check out wrapping around the code segment. */
1153 }
1154 if (cbToTryRead < cbMin - cbLeft)
1155 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1156 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1157 }
1158
1159 /* Only read up to the end of the page, and make sure we don't read more
1160 than the opcode buffer can hold. */
1161 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1162 if (cbToTryRead > cbLeftOnPage)
1163 cbToTryRead = cbLeftOnPage;
1164 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1165 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1166/** @todo r=bird: Convert assertion into undefined opcode exception? */
1167 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1168
1169#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1170 /* Allow interpretation of patch manager code blocks since they can for
1171 instance throw #PFs for perfectly good reasons. */
1172 if (pIemCpu->fInPatchCode)
1173 {
1174 size_t cbRead = 0;
1175 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1176 AssertRCReturn(rc, rc);
1177 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1178 return VINF_SUCCESS;
1179 }
1180#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1181
1182 RTGCPHYS GCPhys;
1183 uint64_t fFlags;
1184 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1185 if (RT_FAILURE(rc))
1186 {
1187 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1188 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1189 }
1190 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1191 {
1192 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1193 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1194 }
1195 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1196 {
1197 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1198 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1199 }
1200 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1201 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1202 /** @todo Check reserved bits and such stuff. PGM is better at doing
1203 * that, so do it when implementing the guest virtual address
1204 * TLB... */
1205
1206 /*
1207 * Read the bytes at this address.
1208 *
1209 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1210 * and since PATM should only patch the start of an instruction there
1211 * should be no need to check again here.
1212 */
1213 if (!pIemCpu->fBypassHandlers)
1214 {
1215 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1216 cbToTryRead, PGMACCESSORIGIN_IEM);
1217 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1218 { /* likely */ }
1219 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1220 {
1221 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1222 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1223 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1224 }
1225 else
1226 {
1227 Log((RT_SUCCESS(rcStrict)
1228 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1229 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1230 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1231 return rcStrict;
1232 }
1233 }
1234 else
1235 {
1236 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1237 if (RT_SUCCESS(rc))
1238 { /* likely */ }
1239 else
1240 {
1241 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1242 return rc;
1243 }
1244 }
1245 pIemCpu->cbOpcode += cbToTryRead;
1246 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1247
1248 return VINF_SUCCESS;
1249}
1250
1251
1252/**
1253 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1254 *
1255 * @returns Strict VBox status code.
1256 * @param pIemCpu The IEM state.
1257 * @param pb Where to return the opcode byte.
1258 */
1259DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1260{
1261 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1262 if (rcStrict == VINF_SUCCESS)
1263 {
1264 uint8_t offOpcode = pIemCpu->offOpcode;
1265 *pb = pIemCpu->abOpcode[offOpcode];
1266 pIemCpu->offOpcode = offOpcode + 1;
1267 }
1268 else
1269 *pb = 0;
1270 return rcStrict;
1271}
1272
1273
1274/**
1275 * Fetches the next opcode byte.
1276 *
1277 * @returns Strict VBox status code.
1278 * @param pIemCpu The IEM state.
1279 * @param pu8 Where to return the opcode byte.
1280 */
1281DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1282{
1283 uint8_t const offOpcode = pIemCpu->offOpcode;
1284 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1285 {
1286 *pu8 = pIemCpu->abOpcode[offOpcode];
1287 pIemCpu->offOpcode = offOpcode + 1;
1288 return VINF_SUCCESS;
1289 }
1290 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1291}
1292
1293
1294/**
1295 * Fetches the next opcode byte, returns automatically on failure.
1296 *
1297 * @param a_pu8 Where to return the opcode byte.
1298 * @remark Implicitly references pIemCpu.
1299 */
1300#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1301 do \
1302 { \
1303 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1304 if (rcStrict2 != VINF_SUCCESS) \
1305 return rcStrict2; \
1306 } while (0)
1307
1308
1309/**
1310 * Fetches the next signed byte from the opcode stream.
1311 *
1312 * @returns Strict VBox status code.
1313 * @param pIemCpu The IEM state.
1314 * @param pi8 Where to return the signed byte.
1315 */
1316DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1317{
1318 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1319}
1320
1321
1322/**
1323 * Fetches the next signed byte from the opcode stream, returning automatically
1324 * on failure.
1325 *
1326 * @param a_pi8 Where to return the signed byte.
1327 * @remark Implicitly references pIemCpu.
1328 */
1329#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1330 do \
1331 { \
1332 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1333 if (rcStrict2 != VINF_SUCCESS) \
1334 return rcStrict2; \
1335 } while (0)
1336
1337
1338/**
1339 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1340 *
1341 * @returns Strict VBox status code.
1342 * @param pIemCpu The IEM state.
1343 * @param pu16 Where to return the opcode dword.
1344 */
1345DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1346{
1347 uint8_t u8;
1348 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1349 if (rcStrict == VINF_SUCCESS)
1350 *pu16 = (int8_t)u8;
1351 return rcStrict;
1352}
1353
1354
1355/**
1356 * Fetches the next signed byte from the opcode stream, extending it to
1357 * unsigned 16-bit.
1358 *
1359 * @returns Strict VBox status code.
1360 * @param pIemCpu The IEM state.
1361 * @param pu16 Where to return the unsigned word.
1362 */
1363DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1364{
1365 uint8_t const offOpcode = pIemCpu->offOpcode;
1366 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1367 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1368
1369 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1370 pIemCpu->offOpcode = offOpcode + 1;
1371 return VINF_SUCCESS;
1372}
1373
1374
1375/**
1376 * Fetches the next signed byte from the opcode stream and sign-extending it to
1377 * a word, returning automatically on failure.
1378 *
1379 * @param a_pu16 Where to return the word.
1380 * @remark Implicitly references pIemCpu.
1381 */
1382#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1383 do \
1384 { \
1385 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1386 if (rcStrict2 != VINF_SUCCESS) \
1387 return rcStrict2; \
1388 } while (0)
1389
1390
1391/**
1392 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1393 *
1394 * @returns Strict VBox status code.
1395 * @param pIemCpu The IEM state.
1396 * @param pu32 Where to return the opcode dword.
1397 */
1398DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1399{
1400 uint8_t u8;
1401 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1402 if (rcStrict == VINF_SUCCESS)
1403 *pu32 = (int8_t)u8;
1404 return rcStrict;
1405}
1406
1407
1408/**
1409 * Fetches the next signed byte from the opcode stream, extending it to
1410 * unsigned 32-bit.
1411 *
1412 * @returns Strict VBox status code.
1413 * @param pIemCpu The IEM state.
1414 * @param pu32 Where to return the unsigned dword.
1415 */
1416DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1417{
1418 uint8_t const offOpcode = pIemCpu->offOpcode;
1419 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1420 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1421
1422 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1423 pIemCpu->offOpcode = offOpcode + 1;
1424 return VINF_SUCCESS;
1425}
1426
1427
1428/**
1429 * Fetches the next signed byte from the opcode stream and sign-extending it to
1430 * a word, returning automatically on failure.
1431 *
1432 * @param a_pu32 Where to return the word.
1433 * @remark Implicitly references pIemCpu.
1434 */
1435#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1436 do \
1437 { \
1438 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1439 if (rcStrict2 != VINF_SUCCESS) \
1440 return rcStrict2; \
1441 } while (0)
1442
1443
1444/**
1445 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1446 *
1447 * @returns Strict VBox status code.
1448 * @param pIemCpu The IEM state.
1449 * @param pu64 Where to return the opcode qword.
1450 */
1451DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1452{
1453 uint8_t u8;
1454 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1455 if (rcStrict == VINF_SUCCESS)
1456 *pu64 = (int8_t)u8;
1457 return rcStrict;
1458}
1459
1460
1461/**
1462 * Fetches the next signed byte from the opcode stream, extending it to
1463 * unsigned 64-bit.
1464 *
1465 * @returns Strict VBox status code.
1466 * @param pIemCpu The IEM state.
1467 * @param pu64 Where to return the unsigned qword.
1468 */
1469DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1470{
1471 uint8_t const offOpcode = pIemCpu->offOpcode;
1472 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1473 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1474
1475 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1476 pIemCpu->offOpcode = offOpcode + 1;
1477 return VINF_SUCCESS;
1478}
1479
1480
1481/**
1482 * Fetches the next signed byte from the opcode stream and sign-extending it to
1483 * a word, returning automatically on failure.
1484 *
1485 * @param a_pu64 Where to return the word.
1486 * @remark Implicitly references pIemCpu.
1487 */
1488#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1489 do \
1490 { \
1491 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1492 if (rcStrict2 != VINF_SUCCESS) \
1493 return rcStrict2; \
1494 } while (0)
1495
1496
1497/**
1498 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1499 *
1500 * @returns Strict VBox status code.
1501 * @param pIemCpu The IEM state.
1502 * @param pu16 Where to return the opcode word.
1503 */
1504DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1505{
1506 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1507 if (rcStrict == VINF_SUCCESS)
1508 {
1509 uint8_t offOpcode = pIemCpu->offOpcode;
1510 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1511 pIemCpu->offOpcode = offOpcode + 2;
1512 }
1513 else
1514 *pu16 = 0;
1515 return rcStrict;
1516}
1517
1518
1519/**
1520 * Fetches the next opcode word.
1521 *
1522 * @returns Strict VBox status code.
1523 * @param pIemCpu The IEM state.
1524 * @param pu16 Where to return the opcode word.
1525 */
1526DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1527{
1528 uint8_t const offOpcode = pIemCpu->offOpcode;
1529 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1530 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1531
1532 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1533 pIemCpu->offOpcode = offOpcode + 2;
1534 return VINF_SUCCESS;
1535}
1536
1537
1538/**
1539 * Fetches the next opcode word, returns automatically on failure.
1540 *
1541 * @param a_pu16 Where to return the opcode word.
1542 * @remark Implicitly references pIemCpu.
1543 */
1544#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1545 do \
1546 { \
1547 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1548 if (rcStrict2 != VINF_SUCCESS) \
1549 return rcStrict2; \
1550 } while (0)
1551
1552
1553/**
1554 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1555 *
1556 * @returns Strict VBox status code.
1557 * @param pIemCpu The IEM state.
1558 * @param pu32 Where to return the opcode double word.
1559 */
1560DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1561{
1562 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1563 if (rcStrict == VINF_SUCCESS)
1564 {
1565 uint8_t offOpcode = pIemCpu->offOpcode;
1566 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1567 pIemCpu->offOpcode = offOpcode + 2;
1568 }
1569 else
1570 *pu32 = 0;
1571 return rcStrict;
1572}
1573
1574
1575/**
1576 * Fetches the next opcode word, zero extending it to a double word.
1577 *
1578 * @returns Strict VBox status code.
1579 * @param pIemCpu The IEM state.
1580 * @param pu32 Where to return the opcode double word.
1581 */
1582DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1583{
1584 uint8_t const offOpcode = pIemCpu->offOpcode;
1585 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1586 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1587
1588 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1589 pIemCpu->offOpcode = offOpcode + 2;
1590 return VINF_SUCCESS;
1591}
1592
1593
1594/**
1595 * Fetches the next opcode word and zero extends it to a double word, returns
1596 * automatically on failure.
1597 *
1598 * @param a_pu32 Where to return the opcode double word.
1599 * @remark Implicitly references pIemCpu.
1600 */
1601#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1602 do \
1603 { \
1604 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1605 if (rcStrict2 != VINF_SUCCESS) \
1606 return rcStrict2; \
1607 } while (0)
1608
1609
1610/**
1611 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1612 *
1613 * @returns Strict VBox status code.
1614 * @param pIemCpu The IEM state.
1615 * @param pu64 Where to return the opcode quad word.
1616 */
1617DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1618{
1619 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1620 if (rcStrict == VINF_SUCCESS)
1621 {
1622 uint8_t offOpcode = pIemCpu->offOpcode;
1623 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1624 pIemCpu->offOpcode = offOpcode + 2;
1625 }
1626 else
1627 *pu64 = 0;
1628 return rcStrict;
1629}
1630
1631
1632/**
1633 * Fetches the next opcode word, zero extending it to a quad word.
1634 *
1635 * @returns Strict VBox status code.
1636 * @param pIemCpu The IEM state.
1637 * @param pu64 Where to return the opcode quad word.
1638 */
1639DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1640{
1641 uint8_t const offOpcode = pIemCpu->offOpcode;
1642 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1643 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1644
1645 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1646 pIemCpu->offOpcode = offOpcode + 2;
1647 return VINF_SUCCESS;
1648}
1649
1650
1651/**
1652 * Fetches the next opcode word and zero extends it to a quad word, returns
1653 * automatically on failure.
1654 *
1655 * @param a_pu64 Where to return the opcode quad word.
1656 * @remark Implicitly references pIemCpu.
1657 */
1658#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1659 do \
1660 { \
1661 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1662 if (rcStrict2 != VINF_SUCCESS) \
1663 return rcStrict2; \
1664 } while (0)
1665
1666
1667/**
1668 * Fetches the next signed word from the opcode stream.
1669 *
1670 * @returns Strict VBox status code.
1671 * @param pIemCpu The IEM state.
1672 * @param pi16 Where to return the signed word.
1673 */
1674DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1675{
1676 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1677}
1678
1679
1680/**
1681 * Fetches the next signed word from the opcode stream, returning automatically
1682 * on failure.
1683 *
1684 * @param a_pi16 Where to return the signed word.
1685 * @remark Implicitly references pIemCpu.
1686 */
1687#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1688 do \
1689 { \
1690 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1691 if (rcStrict2 != VINF_SUCCESS) \
1692 return rcStrict2; \
1693 } while (0)
1694
1695
1696/**
1697 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1698 *
1699 * @returns Strict VBox status code.
1700 * @param pIemCpu The IEM state.
1701 * @param pu32 Where to return the opcode dword.
1702 */
1703DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1704{
1705 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1706 if (rcStrict == VINF_SUCCESS)
1707 {
1708 uint8_t offOpcode = pIemCpu->offOpcode;
1709 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1710 pIemCpu->abOpcode[offOpcode + 1],
1711 pIemCpu->abOpcode[offOpcode + 2],
1712 pIemCpu->abOpcode[offOpcode + 3]);
1713 pIemCpu->offOpcode = offOpcode + 4;
1714 }
1715 else
1716 *pu32 = 0;
1717 return rcStrict;
1718}
1719
1720
1721/**
1722 * Fetches the next opcode dword.
1723 *
1724 * @returns Strict VBox status code.
1725 * @param pIemCpu The IEM state.
1726 * @param pu32 Where to return the opcode double word.
1727 */
1728DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1729{
1730 uint8_t const offOpcode = pIemCpu->offOpcode;
1731 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1732 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1733
1734 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1735 pIemCpu->abOpcode[offOpcode + 1],
1736 pIemCpu->abOpcode[offOpcode + 2],
1737 pIemCpu->abOpcode[offOpcode + 3]);
1738 pIemCpu->offOpcode = offOpcode + 4;
1739 return VINF_SUCCESS;
1740}
1741
1742
1743/**
1744 * Fetches the next opcode dword, returns automatically on failure.
1745 *
1746 * @param a_pu32 Where to return the opcode dword.
1747 * @remark Implicitly references pIemCpu.
1748 */
1749#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1750 do \
1751 { \
1752 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1753 if (rcStrict2 != VINF_SUCCESS) \
1754 return rcStrict2; \
1755 } while (0)
1756
1757
1758/**
1759 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1760 *
1761 * @returns Strict VBox status code.
1762 * @param pIemCpu The IEM state.
1763 * @param pu64 Where to return the opcode dword.
1764 */
1765DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1766{
1767 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1768 if (rcStrict == VINF_SUCCESS)
1769 {
1770 uint8_t offOpcode = pIemCpu->offOpcode;
1771 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1772 pIemCpu->abOpcode[offOpcode + 1],
1773 pIemCpu->abOpcode[offOpcode + 2],
1774 pIemCpu->abOpcode[offOpcode + 3]);
1775 pIemCpu->offOpcode = offOpcode + 4;
1776 }
1777 else
1778 *pu64 = 0;
1779 return rcStrict;
1780}
1781
1782
1783/**
1784 * Fetches the next opcode dword, zero extending it to a quad word.
1785 *
1786 * @returns Strict VBox status code.
1787 * @param pIemCpu The IEM state.
1788 * @param pu64 Where to return the opcode quad word.
1789 */
1790DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1791{
1792 uint8_t const offOpcode = pIemCpu->offOpcode;
1793 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1794 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1795
1796 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1797 pIemCpu->abOpcode[offOpcode + 1],
1798 pIemCpu->abOpcode[offOpcode + 2],
1799 pIemCpu->abOpcode[offOpcode + 3]);
1800 pIemCpu->offOpcode = offOpcode + 4;
1801 return VINF_SUCCESS;
1802}
1803
1804
1805/**
1806 * Fetches the next opcode dword and zero extends it to a quad word, returns
1807 * automatically on failure.
1808 *
1809 * @param a_pu64 Where to return the opcode quad word.
1810 * @remark Implicitly references pIemCpu.
1811 */
1812#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1813 do \
1814 { \
1815 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1816 if (rcStrict2 != VINF_SUCCESS) \
1817 return rcStrict2; \
1818 } while (0)
1819
1820
1821/**
1822 * Fetches the next signed double word from the opcode stream.
1823 *
1824 * @returns Strict VBox status code.
1825 * @param pIemCpu The IEM state.
1826 * @param pi32 Where to return the signed double word.
1827 */
1828DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1829{
1830 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1831}
1832
1833/**
1834 * Fetches the next signed double word from the opcode stream, returning
1835 * automatically on failure.
1836 *
1837 * @param a_pi32 Where to return the signed double word.
1838 * @remark Implicitly references pIemCpu.
1839 */
1840#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1841 do \
1842 { \
1843 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1844 if (rcStrict2 != VINF_SUCCESS) \
1845 return rcStrict2; \
1846 } while (0)
1847
1848
1849/**
1850 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1851 *
1852 * @returns Strict VBox status code.
1853 * @param pIemCpu The IEM state.
1854 * @param pu64 Where to return the opcode qword.
1855 */
1856DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1857{
1858 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1859 if (rcStrict == VINF_SUCCESS)
1860 {
1861 uint8_t offOpcode = pIemCpu->offOpcode;
1862 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1863 pIemCpu->abOpcode[offOpcode + 1],
1864 pIemCpu->abOpcode[offOpcode + 2],
1865 pIemCpu->abOpcode[offOpcode + 3]);
1866 pIemCpu->offOpcode = offOpcode + 4;
1867 }
1868 else
1869 *pu64 = 0;
1870 return rcStrict;
1871}
1872
1873
1874/**
1875 * Fetches the next opcode dword, sign extending it into a quad word.
1876 *
1877 * @returns Strict VBox status code.
1878 * @param pIemCpu The IEM state.
1879 * @param pu64 Where to return the opcode quad word.
1880 */
1881DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1882{
1883 uint8_t const offOpcode = pIemCpu->offOpcode;
1884 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1885 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1886
1887 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1888 pIemCpu->abOpcode[offOpcode + 1],
1889 pIemCpu->abOpcode[offOpcode + 2],
1890 pIemCpu->abOpcode[offOpcode + 3]);
1891 *pu64 = i32;
1892 pIemCpu->offOpcode = offOpcode + 4;
1893 return VINF_SUCCESS;
1894}
1895
1896
1897/**
1898 * Fetches the next opcode double word and sign extends it to a quad word,
1899 * returns automatically on failure.
1900 *
1901 * @param a_pu64 Where to return the opcode quad word.
1902 * @remark Implicitly references pIemCpu.
1903 */
1904#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1905 do \
1906 { \
1907 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1908 if (rcStrict2 != VINF_SUCCESS) \
1909 return rcStrict2; \
1910 } while (0)
1911
1912
1913/**
1914 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1915 *
1916 * @returns Strict VBox status code.
1917 * @param pIemCpu The IEM state.
1918 * @param pu64 Where to return the opcode qword.
1919 */
1920DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1921{
1922 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1923 if (rcStrict == VINF_SUCCESS)
1924 {
1925 uint8_t offOpcode = pIemCpu->offOpcode;
1926 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1927 pIemCpu->abOpcode[offOpcode + 1],
1928 pIemCpu->abOpcode[offOpcode + 2],
1929 pIemCpu->abOpcode[offOpcode + 3],
1930 pIemCpu->abOpcode[offOpcode + 4],
1931 pIemCpu->abOpcode[offOpcode + 5],
1932 pIemCpu->abOpcode[offOpcode + 6],
1933 pIemCpu->abOpcode[offOpcode + 7]);
1934 pIemCpu->offOpcode = offOpcode + 8;
1935 }
1936 else
1937 *pu64 = 0;
1938 return rcStrict;
1939}
1940
1941
1942/**
1943 * Fetches the next opcode qword.
1944 *
1945 * @returns Strict VBox status code.
1946 * @param pIemCpu The IEM state.
1947 * @param pu64 Where to return the opcode qword.
1948 */
1949DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1950{
1951 uint8_t const offOpcode = pIemCpu->offOpcode;
1952 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1953 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1954
1955 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1956 pIemCpu->abOpcode[offOpcode + 1],
1957 pIemCpu->abOpcode[offOpcode + 2],
1958 pIemCpu->abOpcode[offOpcode + 3],
1959 pIemCpu->abOpcode[offOpcode + 4],
1960 pIemCpu->abOpcode[offOpcode + 5],
1961 pIemCpu->abOpcode[offOpcode + 6],
1962 pIemCpu->abOpcode[offOpcode + 7]);
1963 pIemCpu->offOpcode = offOpcode + 8;
1964 return VINF_SUCCESS;
1965}
1966
1967
1968/**
1969 * Fetches the next opcode quad word, returns automatically on failure.
1970 *
1971 * @param a_pu64 Where to return the opcode quad word.
1972 * @remark Implicitly references pIemCpu.
1973 */
1974#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1975 do \
1976 { \
1977 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1978 if (rcStrict2 != VINF_SUCCESS) \
1979 return rcStrict2; \
1980 } while (0)
1981
1982
1983/** @name Misc Worker Functions.
1984 * @{
1985 */
1986
1987
1988/**
1989 * Validates a new SS segment.
1990 *
1991 * @returns VBox strict status code.
1992 * @param pIemCpu The IEM per CPU instance data.
1993 * @param pCtx The CPU context.
1994 * @param NewSS The new SS selctor.
1995 * @param uCpl The CPL to load the stack for.
1996 * @param pDesc Where to return the descriptor.
1997 */
1998IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1999{
2000 NOREF(pCtx);
2001
2002 /* Null selectors are not allowed (we're not called for dispatching
2003 interrupts with SS=0 in long mode). */
2004 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2005 {
2006 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2007 return iemRaiseTaskSwitchFault0(pIemCpu);
2008 }
2009
2010 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2011 if ((NewSS & X86_SEL_RPL) != uCpl)
2012 {
2013 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2014 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2015 }
2016
2017 /*
2018 * Read the descriptor.
2019 */
2020 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
2021 if (rcStrict != VINF_SUCCESS)
2022 return rcStrict;
2023
2024 /*
2025 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2026 */
2027 if (!pDesc->Legacy.Gen.u1DescType)
2028 {
2029 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2030 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2031 }
2032
2033 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2034 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2035 {
2036 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2037 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2038 }
2039 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2040 {
2041 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2042 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2043 }
2044
2045 /* Is it there? */
2046 /** @todo testcase: Is this checked before the canonical / limit check below? */
2047 if (!pDesc->Legacy.Gen.u1Present)
2048 {
2049 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2050 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2051 }
2052
2053 return VINF_SUCCESS;
2054}
2055
2056
2057/**
2058 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2059 * not.
2060 *
2061 * @param a_pIemCpu The IEM per CPU data.
2062 * @param a_pCtx The CPU context.
2063 */
2064#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2065# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2066 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2067 ? (a_pCtx)->eflags.u \
2068 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2069#else
2070# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2071 ( (a_pCtx)->eflags.u )
2072#endif
2073
2074/**
2075 * Updates the EFLAGS in the correct manner wrt. PATM.
2076 *
2077 * @param a_pIemCpu The IEM per CPU data.
2078 * @param a_pCtx The CPU context.
2079 * @param a_fEfl The new EFLAGS.
2080 */
2081#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2082# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2083 do { \
2084 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2085 (a_pCtx)->eflags.u = (a_fEfl); \
2086 else \
2087 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2088 } while (0)
2089#else
2090# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2091 do { \
2092 (a_pCtx)->eflags.u = (a_fEfl); \
2093 } while (0)
2094#endif
2095
2096
2097/** @} */
2098
2099/** @name Raising Exceptions.
2100 *
2101 * @{
2102 */
2103
2104/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2105 * @{ */
2106/** CPU exception. */
2107#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2108/** External interrupt (from PIC, APIC, whatever). */
2109#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2110/** Software interrupt (int or into, not bound).
2111 * Returns to the following instruction */
2112#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2113/** Takes an error code. */
2114#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2115/** Takes a CR2. */
2116#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2117/** Generated by the breakpoint instruction. */
2118#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2119/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2120#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2121/** @} */
2122
2123
2124/**
2125 * Loads the specified stack far pointer from the TSS.
2126 *
2127 * @returns VBox strict status code.
2128 * @param pIemCpu The IEM per CPU instance data.
2129 * @param pCtx The CPU context.
2130 * @param uCpl The CPL to load the stack for.
2131 * @param pSelSS Where to return the new stack segment.
2132 * @param puEsp Where to return the new stack pointer.
2133 */
2134IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2135 PRTSEL pSelSS, uint32_t *puEsp)
2136{
2137 VBOXSTRICTRC rcStrict;
2138 Assert(uCpl < 4);
2139
2140 switch (pCtx->tr.Attr.n.u4Type)
2141 {
2142 /*
2143 * 16-bit TSS (X86TSS16).
2144 */
2145 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2146 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2147 {
2148 uint32_t off = uCpl * 4 + 2;
2149 if (off + 4 <= pCtx->tr.u32Limit)
2150 {
2151 /** @todo check actual access pattern here. */
2152 uint32_t u32Tmp = 0; /* gcc maybe... */
2153 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2154 if (rcStrict == VINF_SUCCESS)
2155 {
2156 *puEsp = RT_LOWORD(u32Tmp);
2157 *pSelSS = RT_HIWORD(u32Tmp);
2158 return VINF_SUCCESS;
2159 }
2160 }
2161 else
2162 {
2163 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2164 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2165 }
2166 break;
2167 }
2168
2169 /*
2170 * 32-bit TSS (X86TSS32).
2171 */
2172 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2173 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2174 {
2175 uint32_t off = uCpl * 8 + 4;
2176 if (off + 7 <= pCtx->tr.u32Limit)
2177 {
2178/** @todo check actual access pattern here. */
2179 uint64_t u64Tmp;
2180 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2181 if (rcStrict == VINF_SUCCESS)
2182 {
2183 *puEsp = u64Tmp & UINT32_MAX;
2184 *pSelSS = (RTSEL)(u64Tmp >> 32);
2185 return VINF_SUCCESS;
2186 }
2187 }
2188 else
2189 {
2190 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2191 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2192 }
2193 break;
2194 }
2195
2196 default:
2197 AssertFailed();
2198 rcStrict = VERR_IEM_IPE_4;
2199 break;
2200 }
2201
2202 *puEsp = 0; /* make gcc happy */
2203 *pSelSS = 0; /* make gcc happy */
2204 return rcStrict;
2205}
2206
2207
2208/**
2209 * Loads the specified stack pointer from the 64-bit TSS.
2210 *
2211 * @returns VBox strict status code.
2212 * @param pIemCpu The IEM per CPU instance data.
2213 * @param pCtx The CPU context.
2214 * @param uCpl The CPL to load the stack for.
2215 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2216 * @param puRsp Where to return the new stack pointer.
2217 */
2218IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2219{
2220 Assert(uCpl < 4);
2221 Assert(uIst < 8);
2222 *puRsp = 0; /* make gcc happy */
2223
2224 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2225
2226 uint32_t off;
2227 if (uIst)
2228 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2229 else
2230 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2231 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2232 {
2233 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2234 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2235 }
2236
2237 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2238}
2239
2240
2241/**
2242 * Adjust the CPU state according to the exception being raised.
2243 *
2244 * @param pCtx The CPU context.
2245 * @param u8Vector The exception that has been raised.
2246 */
2247DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2248{
2249 switch (u8Vector)
2250 {
2251 case X86_XCPT_DB:
2252 pCtx->dr[7] &= ~X86_DR7_GD;
2253 break;
2254 /** @todo Read the AMD and Intel exception reference... */
2255 }
2256}
2257
2258
2259/**
2260 * Implements exceptions and interrupts for real mode.
2261 *
2262 * @returns VBox strict status code.
2263 * @param pIemCpu The IEM per CPU instance data.
2264 * @param pCtx The CPU context.
2265 * @param cbInstr The number of bytes to offset rIP by in the return
2266 * address.
2267 * @param u8Vector The interrupt / exception vector number.
2268 * @param fFlags The flags.
2269 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2270 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2271 */
2272IEM_STATIC VBOXSTRICTRC
2273iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2274 PCPUMCTX pCtx,
2275 uint8_t cbInstr,
2276 uint8_t u8Vector,
2277 uint32_t fFlags,
2278 uint16_t uErr,
2279 uint64_t uCr2)
2280{
2281 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2282 NOREF(uErr); NOREF(uCr2);
2283
2284 /*
2285 * Read the IDT entry.
2286 */
2287 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2288 {
2289 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2290 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2291 }
2292 RTFAR16 Idte;
2293 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2294 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2295 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2296 return rcStrict;
2297
2298 /*
2299 * Push the stack frame.
2300 */
2301 uint16_t *pu16Frame;
2302 uint64_t uNewRsp;
2303 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2304 if (rcStrict != VINF_SUCCESS)
2305 return rcStrict;
2306
2307 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2308 pu16Frame[2] = (uint16_t)fEfl;
2309 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2310 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2311 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2312 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2313 return rcStrict;
2314
2315 /*
2316 * Load the vector address into cs:ip and make exception specific state
2317 * adjustments.
2318 */
2319 pCtx->cs.Sel = Idte.sel;
2320 pCtx->cs.ValidSel = Idte.sel;
2321 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2322 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2323 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2324 pCtx->rip = Idte.off;
2325 fEfl &= ~X86_EFL_IF;
2326 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2327
2328 /** @todo do we actually do this in real mode? */
2329 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2330 iemRaiseXcptAdjustState(pCtx, u8Vector);
2331
2332 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2333}
2334
2335
2336/**
2337 * Loads a NULL data selector into when coming from V8086 mode.
2338 *
2339 * @param pIemCpu The IEM per CPU instance data.
2340 * @param pSReg Pointer to the segment register.
2341 */
2342IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2343{
2344 pSReg->Sel = 0;
2345 pSReg->ValidSel = 0;
2346 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2347 {
2348 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2349 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2350 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2351 }
2352 else
2353 {
2354 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2355 /** @todo check this on AMD-V */
2356 pSReg->u64Base = 0;
2357 pSReg->u32Limit = 0;
2358 }
2359}
2360
2361
2362/**
2363 * Loads a segment selector during a task switch in V8086 mode.
2364 *
2365 * @param pIemCpu The IEM per CPU instance data.
2366 * @param pSReg Pointer to the segment register.
2367 * @param uSel The selector value to load.
2368 */
2369IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2370{
2371 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2372 pSReg->Sel = uSel;
2373 pSReg->ValidSel = uSel;
2374 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2375 pSReg->u64Base = uSel << 4;
2376 pSReg->u32Limit = 0xffff;
2377 pSReg->Attr.u = 0xf3;
2378}
2379
2380
2381/**
2382 * Loads a NULL data selector into a selector register, both the hidden and
2383 * visible parts, in protected mode.
2384 *
2385 * @param pIemCpu The IEM state of the calling EMT.
2386 * @param pSReg Pointer to the segment register.
2387 * @param uRpl The RPL.
2388 */
2389IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2390{
2391 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2392 * data selector in protected mode. */
2393 pSReg->Sel = uRpl;
2394 pSReg->ValidSel = uRpl;
2395 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2396 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2397 {
2398 /* VT-x (Intel 3960x) observed doing something like this. */
2399 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2400 pSReg->u32Limit = UINT32_MAX;
2401 pSReg->u64Base = 0;
2402 }
2403 else
2404 {
2405 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2406 pSReg->u32Limit = 0;
2407 pSReg->u64Base = 0;
2408 }
2409}
2410
2411
2412/**
2413 * Loads a segment selector during a task switch in protected mode.
2414 *
2415 * In this task switch scenario, we would throw \#TS exceptions rather than
2416 * \#GPs.
2417 *
2418 * @returns VBox strict status code.
2419 * @param pIemCpu The IEM per CPU instance data.
2420 * @param pSReg Pointer to the segment register.
2421 * @param uSel The new selector value.
2422 *
2423 * @remarks This does _not_ handle CS or SS.
2424 * @remarks This expects pIemCpu->uCpl to be up to date.
2425 */
2426IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2427{
2428 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2429
2430 /* Null data selector. */
2431 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2432 {
2433 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2434 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2435 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2436 return VINF_SUCCESS;
2437 }
2438
2439 /* Fetch the descriptor. */
2440 IEMSELDESC Desc;
2441 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2442 if (rcStrict != VINF_SUCCESS)
2443 {
2444 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2445 VBOXSTRICTRC_VAL(rcStrict)));
2446 return rcStrict;
2447 }
2448
2449 /* Must be a data segment or readable code segment. */
2450 if ( !Desc.Legacy.Gen.u1DescType
2451 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2452 {
2453 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2454 Desc.Legacy.Gen.u4Type));
2455 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2456 }
2457
2458 /* Check privileges for data segments and non-conforming code segments. */
2459 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2460 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2461 {
2462 /* The RPL and the new CPL must be less than or equal to the DPL. */
2463 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2464 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2465 {
2466 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2467 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2468 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2469 }
2470 }
2471
2472 /* Is it there? */
2473 if (!Desc.Legacy.Gen.u1Present)
2474 {
2475 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2476 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2477 }
2478
2479 /* The base and limit. */
2480 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2481 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2482
2483 /*
2484 * Ok, everything checked out fine. Now set the accessed bit before
2485 * committing the result into the registers.
2486 */
2487 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2488 {
2489 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2490 if (rcStrict != VINF_SUCCESS)
2491 return rcStrict;
2492 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2493 }
2494
2495 /* Commit */
2496 pSReg->Sel = uSel;
2497 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2498 pSReg->u32Limit = cbLimit;
2499 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2500 pSReg->ValidSel = uSel;
2501 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2502 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2503 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2504
2505 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2506 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2507 return VINF_SUCCESS;
2508}
2509
2510
2511/**
2512 * Performs a task switch.
2513 *
2514 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2515 * caller is responsible for performing the necessary checks (like DPL, TSS
2516 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2517 * reference for JMP, CALL, IRET.
2518 *
2519 * If the task switch is the due to a software interrupt or hardware exception,
2520 * the caller is responsible for validating the TSS selector and descriptor. See
2521 * Intel Instruction reference for INT n.
2522 *
2523 * @returns VBox strict status code.
2524 * @param pIemCpu The IEM per CPU instance data.
2525 * @param pCtx The CPU context.
2526 * @param enmTaskSwitch What caused this task switch.
2527 * @param uNextEip The EIP effective after the task switch.
2528 * @param fFlags The flags.
2529 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2530 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2531 * @param SelTSS The TSS selector of the new task.
2532 * @param pNewDescTSS Pointer to the new TSS descriptor.
2533 */
2534IEM_STATIC VBOXSTRICTRC
2535iemTaskSwitch(PIEMCPU pIemCpu,
2536 PCPUMCTX pCtx,
2537 IEMTASKSWITCH enmTaskSwitch,
2538 uint32_t uNextEip,
2539 uint32_t fFlags,
2540 uint16_t uErr,
2541 uint64_t uCr2,
2542 RTSEL SelTSS,
2543 PIEMSELDESC pNewDescTSS)
2544{
2545 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2546 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2547
2548 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2549 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2550 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2551 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2552 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2553
2554 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2555 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2556
2557 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2558 fIsNewTSS386, pCtx->eip, uNextEip));
2559
2560 /* Update CR2 in case it's a page-fault. */
2561 /** @todo This should probably be done much earlier in IEM/PGM. See
2562 * @bugref{5653#c49}. */
2563 if (fFlags & IEM_XCPT_FLAGS_CR2)
2564 pCtx->cr2 = uCr2;
2565
2566 /*
2567 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2568 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2569 */
2570 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2571 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2572 if (uNewTSSLimit < uNewTSSLimitMin)
2573 {
2574 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2575 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2576 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2577 }
2578
2579 /*
2580 * Check the current TSS limit. The last written byte to the current TSS during the
2581 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2582 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2583 *
2584 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2585 * end up with smaller than "legal" TSS limits.
2586 */
2587 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2588 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2589 if (uCurTSSLimit < uCurTSSLimitMin)
2590 {
2591 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2592 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2593 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2594 }
2595
2596 /*
2597 * Verify that the new TSS can be accessed and map it. Map only the required contents
2598 * and not the entire TSS.
2599 */
2600 void *pvNewTSS;
2601 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2602 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2603 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2604 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2605 * not perform correct translation if this happens. See Intel spec. 7.2.1
2606 * "Task-State Segment" */
2607 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2608 if (rcStrict != VINF_SUCCESS)
2609 {
2610 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2611 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2612 return rcStrict;
2613 }
2614
2615 /*
2616 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2617 */
2618 uint32_t u32EFlags = pCtx->eflags.u32;
2619 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2620 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2621 {
2622 PX86DESC pDescCurTSS;
2623 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2624 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2625 if (rcStrict != VINF_SUCCESS)
2626 {
2627 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2628 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2629 return rcStrict;
2630 }
2631
2632 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2633 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2634 if (rcStrict != VINF_SUCCESS)
2635 {
2636 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2637 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2638 return rcStrict;
2639 }
2640
2641 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2642 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2643 {
2644 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2645 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2646 u32EFlags &= ~X86_EFL_NT;
2647 }
2648 }
2649
2650 /*
2651 * Save the CPU state into the current TSS.
2652 */
2653 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2654 if (GCPtrNewTSS == GCPtrCurTSS)
2655 {
2656 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2657 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2658 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2659 }
2660 if (fIsNewTSS386)
2661 {
2662 /*
2663 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2664 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2665 */
2666 void *pvCurTSS32;
2667 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2668 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2669 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2670 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2671 if (rcStrict != VINF_SUCCESS)
2672 {
2673 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2674 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2675 return rcStrict;
2676 }
2677
2678 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2679 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2680 pCurTSS32->eip = uNextEip;
2681 pCurTSS32->eflags = u32EFlags;
2682 pCurTSS32->eax = pCtx->eax;
2683 pCurTSS32->ecx = pCtx->ecx;
2684 pCurTSS32->edx = pCtx->edx;
2685 pCurTSS32->ebx = pCtx->ebx;
2686 pCurTSS32->esp = pCtx->esp;
2687 pCurTSS32->ebp = pCtx->ebp;
2688 pCurTSS32->esi = pCtx->esi;
2689 pCurTSS32->edi = pCtx->edi;
2690 pCurTSS32->es = pCtx->es.Sel;
2691 pCurTSS32->cs = pCtx->cs.Sel;
2692 pCurTSS32->ss = pCtx->ss.Sel;
2693 pCurTSS32->ds = pCtx->ds.Sel;
2694 pCurTSS32->fs = pCtx->fs.Sel;
2695 pCurTSS32->gs = pCtx->gs.Sel;
2696
2697 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2698 if (rcStrict != VINF_SUCCESS)
2699 {
2700 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2701 VBOXSTRICTRC_VAL(rcStrict)));
2702 return rcStrict;
2703 }
2704 }
2705 else
2706 {
2707 /*
2708 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2709 */
2710 void *pvCurTSS16;
2711 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2712 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2713 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2714 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2715 if (rcStrict != VINF_SUCCESS)
2716 {
2717 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2718 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2719 return rcStrict;
2720 }
2721
2722 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2723 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2724 pCurTSS16->ip = uNextEip;
2725 pCurTSS16->flags = u32EFlags;
2726 pCurTSS16->ax = pCtx->ax;
2727 pCurTSS16->cx = pCtx->cx;
2728 pCurTSS16->dx = pCtx->dx;
2729 pCurTSS16->bx = pCtx->bx;
2730 pCurTSS16->sp = pCtx->sp;
2731 pCurTSS16->bp = pCtx->bp;
2732 pCurTSS16->si = pCtx->si;
2733 pCurTSS16->di = pCtx->di;
2734 pCurTSS16->es = pCtx->es.Sel;
2735 pCurTSS16->cs = pCtx->cs.Sel;
2736 pCurTSS16->ss = pCtx->ss.Sel;
2737 pCurTSS16->ds = pCtx->ds.Sel;
2738
2739 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2740 if (rcStrict != VINF_SUCCESS)
2741 {
2742 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2743 VBOXSTRICTRC_VAL(rcStrict)));
2744 return rcStrict;
2745 }
2746 }
2747
2748 /*
2749 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2750 */
2751 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2752 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2753 {
2754 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2755 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2756 pNewTSS->selPrev = pCtx->tr.Sel;
2757 }
2758
2759 /*
2760 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2761 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2762 */
2763 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2764 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2765 bool fNewDebugTrap;
2766 if (fIsNewTSS386)
2767 {
2768 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2769 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2770 uNewEip = pNewTSS32->eip;
2771 uNewEflags = pNewTSS32->eflags;
2772 uNewEax = pNewTSS32->eax;
2773 uNewEcx = pNewTSS32->ecx;
2774 uNewEdx = pNewTSS32->edx;
2775 uNewEbx = pNewTSS32->ebx;
2776 uNewEsp = pNewTSS32->esp;
2777 uNewEbp = pNewTSS32->ebp;
2778 uNewEsi = pNewTSS32->esi;
2779 uNewEdi = pNewTSS32->edi;
2780 uNewES = pNewTSS32->es;
2781 uNewCS = pNewTSS32->cs;
2782 uNewSS = pNewTSS32->ss;
2783 uNewDS = pNewTSS32->ds;
2784 uNewFS = pNewTSS32->fs;
2785 uNewGS = pNewTSS32->gs;
2786 uNewLdt = pNewTSS32->selLdt;
2787 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2788 }
2789 else
2790 {
2791 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2792 uNewCr3 = 0;
2793 uNewEip = pNewTSS16->ip;
2794 uNewEflags = pNewTSS16->flags;
2795 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2796 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2797 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2798 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2799 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2800 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2801 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2802 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2803 uNewES = pNewTSS16->es;
2804 uNewCS = pNewTSS16->cs;
2805 uNewSS = pNewTSS16->ss;
2806 uNewDS = pNewTSS16->ds;
2807 uNewFS = 0;
2808 uNewGS = 0;
2809 uNewLdt = pNewTSS16->selLdt;
2810 fNewDebugTrap = false;
2811 }
2812
2813 if (GCPtrNewTSS == GCPtrCurTSS)
2814 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2815 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2816
2817 /*
2818 * We're done accessing the new TSS.
2819 */
2820 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2821 if (rcStrict != VINF_SUCCESS)
2822 {
2823 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2824 return rcStrict;
2825 }
2826
2827 /*
2828 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2829 */
2830 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2831 {
2832 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2833 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2834 if (rcStrict != VINF_SUCCESS)
2835 {
2836 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2837 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2838 return rcStrict;
2839 }
2840
2841 /* Check that the descriptor indicates the new TSS is available (not busy). */
2842 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2843 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2844 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2845
2846 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2847 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2848 if (rcStrict != VINF_SUCCESS)
2849 {
2850 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2851 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2852 return rcStrict;
2853 }
2854 }
2855
2856 /*
2857 * From this point on, we're technically in the new task. We will defer exceptions
2858 * until the completion of the task switch but before executing any instructions in the new task.
2859 */
2860 pCtx->tr.Sel = SelTSS;
2861 pCtx->tr.ValidSel = SelTSS;
2862 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2863 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2864 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2865 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2866 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2867
2868 /* Set the busy bit in TR. */
2869 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2870 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2871 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2872 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2873 {
2874 uNewEflags |= X86_EFL_NT;
2875 }
2876
2877 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2878 pCtx->cr0 |= X86_CR0_TS;
2879 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2880
2881 pCtx->eip = uNewEip;
2882 pCtx->eax = uNewEax;
2883 pCtx->ecx = uNewEcx;
2884 pCtx->edx = uNewEdx;
2885 pCtx->ebx = uNewEbx;
2886 pCtx->esp = uNewEsp;
2887 pCtx->ebp = uNewEbp;
2888 pCtx->esi = uNewEsi;
2889 pCtx->edi = uNewEdi;
2890
2891 uNewEflags &= X86_EFL_LIVE_MASK;
2892 uNewEflags |= X86_EFL_RA1_MASK;
2893 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2894
2895 /*
2896 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2897 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2898 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2899 */
2900 pCtx->es.Sel = uNewES;
2901 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2902 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2903
2904 pCtx->cs.Sel = uNewCS;
2905 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2906 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2907
2908 pCtx->ss.Sel = uNewSS;
2909 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2910 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2911
2912 pCtx->ds.Sel = uNewDS;
2913 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2914 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2915
2916 pCtx->fs.Sel = uNewFS;
2917 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2918 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2919
2920 pCtx->gs.Sel = uNewGS;
2921 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2922 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2923 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2924
2925 pCtx->ldtr.Sel = uNewLdt;
2926 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2927 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2928 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2929
2930 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2931 {
2932 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2933 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2934 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2935 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2936 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2937 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2938 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2939 }
2940
2941 /*
2942 * Switch CR3 for the new task.
2943 */
2944 if ( fIsNewTSS386
2945 && (pCtx->cr0 & X86_CR0_PG))
2946 {
2947 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2948 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2949 {
2950 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2951 AssertRCSuccessReturn(rc, rc);
2952 }
2953 else
2954 pCtx->cr3 = uNewCr3;
2955
2956 /* Inform PGM. */
2957 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2958 {
2959 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2960 AssertRCReturn(rc, rc);
2961 /* ignore informational status codes */
2962 }
2963 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2964 }
2965
2966 /*
2967 * Switch LDTR for the new task.
2968 */
2969 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2970 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2971 else
2972 {
2973 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2974
2975 IEMSELDESC DescNewLdt;
2976 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2977 if (rcStrict != VINF_SUCCESS)
2978 {
2979 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2980 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2981 return rcStrict;
2982 }
2983 if ( !DescNewLdt.Legacy.Gen.u1Present
2984 || DescNewLdt.Legacy.Gen.u1DescType
2985 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2986 {
2987 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2988 uNewLdt, DescNewLdt.Legacy.u));
2989 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2990 }
2991
2992 pCtx->ldtr.ValidSel = uNewLdt;
2993 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2994 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2995 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2996 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2997 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2998 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2999 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
3000 }
3001
3002 IEMSELDESC DescSS;
3003 if (IEM_IS_V86_MODE(pIemCpu))
3004 {
3005 pIemCpu->uCpl = 3;
3006 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
3007 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
3008 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
3009 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
3010 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
3011 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
3012 }
3013 else
3014 {
3015 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
3016
3017 /*
3018 * Load the stack segment for the new task.
3019 */
3020 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3021 {
3022 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3023 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3024 }
3025
3026 /* Fetch the descriptor. */
3027 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
3028 if (rcStrict != VINF_SUCCESS)
3029 {
3030 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3031 VBOXSTRICTRC_VAL(rcStrict)));
3032 return rcStrict;
3033 }
3034
3035 /* SS must be a data segment and writable. */
3036 if ( !DescSS.Legacy.Gen.u1DescType
3037 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3038 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3039 {
3040 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3041 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3042 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3043 }
3044
3045 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3046 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3047 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3048 {
3049 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3050 uNewCpl));
3051 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3052 }
3053
3054 /* Is it there? */
3055 if (!DescSS.Legacy.Gen.u1Present)
3056 {
3057 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3058 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3059 }
3060
3061 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3062 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3063
3064 /* Set the accessed bit before committing the result into SS. */
3065 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3066 {
3067 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3068 if (rcStrict != VINF_SUCCESS)
3069 return rcStrict;
3070 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3071 }
3072
3073 /* Commit SS. */
3074 pCtx->ss.Sel = uNewSS;
3075 pCtx->ss.ValidSel = uNewSS;
3076 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3077 pCtx->ss.u32Limit = cbLimit;
3078 pCtx->ss.u64Base = u64Base;
3079 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3081
3082 /* CPL has changed, update IEM before loading rest of segments. */
3083 pIemCpu->uCpl = uNewCpl;
3084
3085 /*
3086 * Load the data segments for the new task.
3087 */
3088 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3089 if (rcStrict != VINF_SUCCESS)
3090 return rcStrict;
3091 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3092 if (rcStrict != VINF_SUCCESS)
3093 return rcStrict;
3094 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3095 if (rcStrict != VINF_SUCCESS)
3096 return rcStrict;
3097 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3098 if (rcStrict != VINF_SUCCESS)
3099 return rcStrict;
3100
3101 /*
3102 * Load the code segment for the new task.
3103 */
3104 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3105 {
3106 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3107 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3108 }
3109
3110 /* Fetch the descriptor. */
3111 IEMSELDESC DescCS;
3112 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3113 if (rcStrict != VINF_SUCCESS)
3114 {
3115 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3116 return rcStrict;
3117 }
3118
3119 /* CS must be a code segment. */
3120 if ( !DescCS.Legacy.Gen.u1DescType
3121 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3122 {
3123 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3124 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3125 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3126 }
3127
3128 /* For conforming CS, DPL must be less than or equal to the RPL. */
3129 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3130 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3131 {
3132 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3133 DescCS.Legacy.Gen.u2Dpl));
3134 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3135 }
3136
3137 /* For non-conforming CS, DPL must match RPL. */
3138 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3139 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3140 {
3141 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3142 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3143 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3144 }
3145
3146 /* Is it there? */
3147 if (!DescCS.Legacy.Gen.u1Present)
3148 {
3149 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3150 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3151 }
3152
3153 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3154 u64Base = X86DESC_BASE(&DescCS.Legacy);
3155
3156 /* Set the accessed bit before committing the result into CS. */
3157 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3158 {
3159 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3160 if (rcStrict != VINF_SUCCESS)
3161 return rcStrict;
3162 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3163 }
3164
3165 /* Commit CS. */
3166 pCtx->cs.Sel = uNewCS;
3167 pCtx->cs.ValidSel = uNewCS;
3168 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3169 pCtx->cs.u32Limit = cbLimit;
3170 pCtx->cs.u64Base = u64Base;
3171 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3172 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3173 }
3174
3175 /** @todo Debug trap. */
3176 if (fIsNewTSS386 && fNewDebugTrap)
3177 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3178
3179 /*
3180 * Construct the error code masks based on what caused this task switch.
3181 * See Intel Instruction reference for INT.
3182 */
3183 uint16_t uExt;
3184 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3185 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3186 {
3187 uExt = 1;
3188 }
3189 else
3190 uExt = 0;
3191
3192 /*
3193 * Push any error code on to the new stack.
3194 */
3195 if (fFlags & IEM_XCPT_FLAGS_ERR)
3196 {
3197 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3198 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3199 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3200
3201 /* Check that there is sufficient space on the stack. */
3202 /** @todo Factor out segment limit checking for normal/expand down segments
3203 * into a separate function. */
3204 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3205 {
3206 if ( pCtx->esp - 1 > cbLimitSS
3207 || pCtx->esp < cbStackFrame)
3208 {
3209 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3210 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3211 cbStackFrame));
3212 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3213 }
3214 }
3215 else
3216 {
3217 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3218 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3219 {
3220 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3221 cbStackFrame));
3222 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3223 }
3224 }
3225
3226
3227 if (fIsNewTSS386)
3228 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3229 else
3230 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3231 if (rcStrict != VINF_SUCCESS)
3232 {
3233 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3234 VBOXSTRICTRC_VAL(rcStrict)));
3235 return rcStrict;
3236 }
3237 }
3238
3239 /* Check the new EIP against the new CS limit. */
3240 if (pCtx->eip > pCtx->cs.u32Limit)
3241 {
3242 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3243 pCtx->eip, pCtx->cs.u32Limit));
3244 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3245 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3246 }
3247
3248 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3249 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3250}
3251
3252
3253/**
3254 * Implements exceptions and interrupts for protected mode.
3255 *
3256 * @returns VBox strict status code.
3257 * @param pIemCpu The IEM per CPU instance data.
3258 * @param pCtx The CPU context.
3259 * @param cbInstr The number of bytes to offset rIP by in the return
3260 * address.
3261 * @param u8Vector The interrupt / exception vector number.
3262 * @param fFlags The flags.
3263 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3264 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3265 */
3266IEM_STATIC VBOXSTRICTRC
3267iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3268 PCPUMCTX pCtx,
3269 uint8_t cbInstr,
3270 uint8_t u8Vector,
3271 uint32_t fFlags,
3272 uint16_t uErr,
3273 uint64_t uCr2)
3274{
3275 /*
3276 * Read the IDT entry.
3277 */
3278 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3279 {
3280 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3281 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3282 }
3283 X86DESC Idte;
3284 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3285 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3286 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3287 return rcStrict;
3288 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3289 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3290 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3291
3292 /*
3293 * Check the descriptor type, DPL and such.
3294 * ASSUMES this is done in the same order as described for call-gate calls.
3295 */
3296 if (Idte.Gate.u1DescType)
3297 {
3298 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3299 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3300 }
3301 bool fTaskGate = false;
3302 uint8_t f32BitGate = true;
3303 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3304 switch (Idte.Gate.u4Type)
3305 {
3306 case X86_SEL_TYPE_SYS_UNDEFINED:
3307 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3308 case X86_SEL_TYPE_SYS_LDT:
3309 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3310 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3311 case X86_SEL_TYPE_SYS_UNDEFINED2:
3312 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3313 case X86_SEL_TYPE_SYS_UNDEFINED3:
3314 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3315 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3316 case X86_SEL_TYPE_SYS_UNDEFINED4:
3317 {
3318 /** @todo check what actually happens when the type is wrong...
3319 * esp. call gates. */
3320 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3321 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3322 }
3323
3324 case X86_SEL_TYPE_SYS_286_INT_GATE:
3325 f32BitGate = false;
3326 case X86_SEL_TYPE_SYS_386_INT_GATE:
3327 fEflToClear |= X86_EFL_IF;
3328 break;
3329
3330 case X86_SEL_TYPE_SYS_TASK_GATE:
3331 fTaskGate = true;
3332#ifndef IEM_IMPLEMENTS_TASKSWITCH
3333 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3334#endif
3335 break;
3336
3337 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3338 f32BitGate = false;
3339 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3340 break;
3341
3342 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3343 }
3344
3345 /* Check DPL against CPL if applicable. */
3346 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3347 {
3348 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3349 {
3350 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3351 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3352 }
3353 }
3354
3355 /* Is it there? */
3356 if (!Idte.Gate.u1Present)
3357 {
3358 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3359 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3360 }
3361
3362 /* Is it a task-gate? */
3363 if (fTaskGate)
3364 {
3365 /*
3366 * Construct the error code masks based on what caused this task switch.
3367 * See Intel Instruction reference for INT.
3368 */
3369 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3370 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3371 RTSEL SelTSS = Idte.Gate.u16Sel;
3372
3373 /*
3374 * Fetch the TSS descriptor in the GDT.
3375 */
3376 IEMSELDESC DescTSS;
3377 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3378 if (rcStrict != VINF_SUCCESS)
3379 {
3380 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3381 VBOXSTRICTRC_VAL(rcStrict)));
3382 return rcStrict;
3383 }
3384
3385 /* The TSS descriptor must be a system segment and be available (not busy). */
3386 if ( DescTSS.Legacy.Gen.u1DescType
3387 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3388 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3389 {
3390 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3391 u8Vector, SelTSS, DescTSS.Legacy.au64));
3392 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3393 }
3394
3395 /* The TSS must be present. */
3396 if (!DescTSS.Legacy.Gen.u1Present)
3397 {
3398 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3399 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3400 }
3401
3402 /* Do the actual task switch. */
3403 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3404 }
3405
3406 /* A null CS is bad. */
3407 RTSEL NewCS = Idte.Gate.u16Sel;
3408 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3409 {
3410 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3411 return iemRaiseGeneralProtectionFault0(pIemCpu);
3412 }
3413
3414 /* Fetch the descriptor for the new CS. */
3415 IEMSELDESC DescCS;
3416 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3417 if (rcStrict != VINF_SUCCESS)
3418 {
3419 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3420 return rcStrict;
3421 }
3422
3423 /* Must be a code segment. */
3424 if (!DescCS.Legacy.Gen.u1DescType)
3425 {
3426 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3427 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3428 }
3429 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3430 {
3431 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3432 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3433 }
3434
3435 /* Don't allow lowering the privilege level. */
3436 /** @todo Does the lowering of privileges apply to software interrupts
3437 * only? This has bearings on the more-privileged or
3438 * same-privilege stack behavior further down. A testcase would
3439 * be nice. */
3440 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3441 {
3442 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3443 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3444 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3445 }
3446
3447 /* Make sure the selector is present. */
3448 if (!DescCS.Legacy.Gen.u1Present)
3449 {
3450 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3451 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3452 }
3453
3454 /* Check the new EIP against the new CS limit. */
3455 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3456 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3457 ? Idte.Gate.u16OffsetLow
3458 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3459 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3460 if (uNewEip > cbLimitCS)
3461 {
3462 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3463 u8Vector, uNewEip, cbLimitCS, NewCS));
3464 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3465 }
3466
3467 /* Calc the flag image to push. */
3468 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3469 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3470 fEfl &= ~X86_EFL_RF;
3471 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3472 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3473
3474 /* From V8086 mode only go to CPL 0. */
3475 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3476 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3477 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3478 {
3479 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3480 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3481 }
3482
3483 /*
3484 * If the privilege level changes, we need to get a new stack from the TSS.
3485 * This in turns means validating the new SS and ESP...
3486 */
3487 if (uNewCpl != pIemCpu->uCpl)
3488 {
3489 RTSEL NewSS;
3490 uint32_t uNewEsp;
3491 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3492 if (rcStrict != VINF_SUCCESS)
3493 return rcStrict;
3494
3495 IEMSELDESC DescSS;
3496 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3497 if (rcStrict != VINF_SUCCESS)
3498 return rcStrict;
3499
3500 /* Check that there is sufficient space for the stack frame. */
3501 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3502 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3503 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3504 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3505
3506 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3507 {
3508 if ( uNewEsp - 1 > cbLimitSS
3509 || uNewEsp < cbStackFrame)
3510 {
3511 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3512 u8Vector, NewSS, uNewEsp, cbStackFrame));
3513 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3514 }
3515 }
3516 else
3517 {
3518 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3519 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3520 {
3521 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3522 u8Vector, NewSS, uNewEsp, cbStackFrame));
3523 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3524 }
3525 }
3526
3527 /*
3528 * Start making changes.
3529 */
3530
3531 /* Create the stack frame. */
3532 RTPTRUNION uStackFrame;
3533 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3534 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3535 if (rcStrict != VINF_SUCCESS)
3536 return rcStrict;
3537 void * const pvStackFrame = uStackFrame.pv;
3538 if (f32BitGate)
3539 {
3540 if (fFlags & IEM_XCPT_FLAGS_ERR)
3541 *uStackFrame.pu32++ = uErr;
3542 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3543 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3544 uStackFrame.pu32[2] = fEfl;
3545 uStackFrame.pu32[3] = pCtx->esp;
3546 uStackFrame.pu32[4] = pCtx->ss.Sel;
3547 if (fEfl & X86_EFL_VM)
3548 {
3549 uStackFrame.pu32[1] = pCtx->cs.Sel;
3550 uStackFrame.pu32[5] = pCtx->es.Sel;
3551 uStackFrame.pu32[6] = pCtx->ds.Sel;
3552 uStackFrame.pu32[7] = pCtx->fs.Sel;
3553 uStackFrame.pu32[8] = pCtx->gs.Sel;
3554 }
3555 }
3556 else
3557 {
3558 if (fFlags & IEM_XCPT_FLAGS_ERR)
3559 *uStackFrame.pu16++ = uErr;
3560 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3561 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3562 uStackFrame.pu16[2] = fEfl;
3563 uStackFrame.pu16[3] = pCtx->sp;
3564 uStackFrame.pu16[4] = pCtx->ss.Sel;
3565 if (fEfl & X86_EFL_VM)
3566 {
3567 uStackFrame.pu16[1] = pCtx->cs.Sel;
3568 uStackFrame.pu16[5] = pCtx->es.Sel;
3569 uStackFrame.pu16[6] = pCtx->ds.Sel;
3570 uStackFrame.pu16[7] = pCtx->fs.Sel;
3571 uStackFrame.pu16[8] = pCtx->gs.Sel;
3572 }
3573 }
3574 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3575 if (rcStrict != VINF_SUCCESS)
3576 return rcStrict;
3577
3578 /* Mark the selectors 'accessed' (hope this is the correct time). */
3579 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3580 * after pushing the stack frame? (Write protect the gdt + stack to
3581 * find out.) */
3582 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3583 {
3584 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3585 if (rcStrict != VINF_SUCCESS)
3586 return rcStrict;
3587 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3588 }
3589
3590 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3591 {
3592 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3593 if (rcStrict != VINF_SUCCESS)
3594 return rcStrict;
3595 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3596 }
3597
3598 /*
3599 * Start comitting the register changes (joins with the DPL=CPL branch).
3600 */
3601 pCtx->ss.Sel = NewSS;
3602 pCtx->ss.ValidSel = NewSS;
3603 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3604 pCtx->ss.u32Limit = cbLimitSS;
3605 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3606 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3607 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3608 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3609 * SP is loaded).
3610 * Need to check the other combinations too:
3611 * - 16-bit TSS, 32-bit handler
3612 * - 32-bit TSS, 16-bit handler */
3613 if (!pCtx->ss.Attr.n.u1DefBig)
3614 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
3615 else
3616 pCtx->rsp = uNewEsp - cbStackFrame;
3617 pIemCpu->uCpl = uNewCpl;
3618
3619 if (fEfl & X86_EFL_VM)
3620 {
3621 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3622 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3623 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3624 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3625 }
3626 }
3627 /*
3628 * Same privilege, no stack change and smaller stack frame.
3629 */
3630 else
3631 {
3632 uint64_t uNewRsp;
3633 RTPTRUNION uStackFrame;
3634 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3635 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3636 if (rcStrict != VINF_SUCCESS)
3637 return rcStrict;
3638 void * const pvStackFrame = uStackFrame.pv;
3639
3640 if (f32BitGate)
3641 {
3642 if (fFlags & IEM_XCPT_FLAGS_ERR)
3643 *uStackFrame.pu32++ = uErr;
3644 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3645 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3646 uStackFrame.pu32[2] = fEfl;
3647 }
3648 else
3649 {
3650 if (fFlags & IEM_XCPT_FLAGS_ERR)
3651 *uStackFrame.pu16++ = uErr;
3652 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3653 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3654 uStackFrame.pu16[2] = fEfl;
3655 }
3656 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3657 if (rcStrict != VINF_SUCCESS)
3658 return rcStrict;
3659
3660 /* Mark the CS selector as 'accessed'. */
3661 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3662 {
3663 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3664 if (rcStrict != VINF_SUCCESS)
3665 return rcStrict;
3666 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3667 }
3668
3669 /*
3670 * Start committing the register changes (joins with the other branch).
3671 */
3672 pCtx->rsp = uNewRsp;
3673 }
3674
3675 /* ... register committing continues. */
3676 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3677 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3678 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3679 pCtx->cs.u32Limit = cbLimitCS;
3680 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3681 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3682
3683 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3684 fEfl &= ~fEflToClear;
3685 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3686
3687 if (fFlags & IEM_XCPT_FLAGS_CR2)
3688 pCtx->cr2 = uCr2;
3689
3690 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3691 iemRaiseXcptAdjustState(pCtx, u8Vector);
3692
3693 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3694}
3695
3696
3697/**
3698 * Implements exceptions and interrupts for long mode.
3699 *
3700 * @returns VBox strict status code.
3701 * @param pIemCpu The IEM per CPU instance data.
3702 * @param pCtx The CPU context.
3703 * @param cbInstr The number of bytes to offset rIP by in the return
3704 * address.
3705 * @param u8Vector The interrupt / exception vector number.
3706 * @param fFlags The flags.
3707 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3708 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3709 */
3710IEM_STATIC VBOXSTRICTRC
3711iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3712 PCPUMCTX pCtx,
3713 uint8_t cbInstr,
3714 uint8_t u8Vector,
3715 uint32_t fFlags,
3716 uint16_t uErr,
3717 uint64_t uCr2)
3718{
3719 /*
3720 * Read the IDT entry.
3721 */
3722 uint16_t offIdt = (uint16_t)u8Vector << 4;
3723 if (pCtx->idtr.cbIdt < offIdt + 7)
3724 {
3725 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3726 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3727 }
3728 X86DESC64 Idte;
3729 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3730 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3731 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3732 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3733 return rcStrict;
3734 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3735 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3736 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3737
3738 /*
3739 * Check the descriptor type, DPL and such.
3740 * ASSUMES this is done in the same order as described for call-gate calls.
3741 */
3742 if (Idte.Gate.u1DescType)
3743 {
3744 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3745 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3746 }
3747 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3748 switch (Idte.Gate.u4Type)
3749 {
3750 case AMD64_SEL_TYPE_SYS_INT_GATE:
3751 fEflToClear |= X86_EFL_IF;
3752 break;
3753 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3754 break;
3755
3756 default:
3757 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3758 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3759 }
3760
3761 /* Check DPL against CPL if applicable. */
3762 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3763 {
3764 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3765 {
3766 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3767 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3768 }
3769 }
3770
3771 /* Is it there? */
3772 if (!Idte.Gate.u1Present)
3773 {
3774 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3775 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3776 }
3777
3778 /* A null CS is bad. */
3779 RTSEL NewCS = Idte.Gate.u16Sel;
3780 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3781 {
3782 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3783 return iemRaiseGeneralProtectionFault0(pIemCpu);
3784 }
3785
3786 /* Fetch the descriptor for the new CS. */
3787 IEMSELDESC DescCS;
3788 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3789 if (rcStrict != VINF_SUCCESS)
3790 {
3791 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3792 return rcStrict;
3793 }
3794
3795 /* Must be a 64-bit code segment. */
3796 if (!DescCS.Long.Gen.u1DescType)
3797 {
3798 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3799 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3800 }
3801 if ( !DescCS.Long.Gen.u1Long
3802 || DescCS.Long.Gen.u1DefBig
3803 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3804 {
3805 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3806 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3807 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3808 }
3809
3810 /* Don't allow lowering the privilege level. For non-conforming CS
3811 selectors, the CS.DPL sets the privilege level the trap/interrupt
3812 handler runs at. For conforming CS selectors, the CPL remains
3813 unchanged, but the CS.DPL must be <= CPL. */
3814 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3815 * when CPU in Ring-0. Result \#GP? */
3816 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3817 {
3818 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3819 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3820 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3821 }
3822
3823
3824 /* Make sure the selector is present. */
3825 if (!DescCS.Legacy.Gen.u1Present)
3826 {
3827 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3828 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3829 }
3830
3831 /* Check that the new RIP is canonical. */
3832 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3833 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3834 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3835 if (!IEM_IS_CANONICAL(uNewRip))
3836 {
3837 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3838 return iemRaiseGeneralProtectionFault0(pIemCpu);
3839 }
3840
3841 /*
3842 * If the privilege level changes or if the IST isn't zero, we need to get
3843 * a new stack from the TSS.
3844 */
3845 uint64_t uNewRsp;
3846 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3847 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3848 if ( uNewCpl != pIemCpu->uCpl
3849 || Idte.Gate.u3IST != 0)
3850 {
3851 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3852 if (rcStrict != VINF_SUCCESS)
3853 return rcStrict;
3854 }
3855 else
3856 uNewRsp = pCtx->rsp;
3857 uNewRsp &= ~(uint64_t)0xf;
3858
3859 /*
3860 * Calc the flag image to push.
3861 */
3862 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3863 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3864 fEfl &= ~X86_EFL_RF;
3865 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3866 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3867
3868 /*
3869 * Start making changes.
3870 */
3871
3872 /* Create the stack frame. */
3873 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3874 RTPTRUNION uStackFrame;
3875 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3876 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3877 if (rcStrict != VINF_SUCCESS)
3878 return rcStrict;
3879 void * const pvStackFrame = uStackFrame.pv;
3880
3881 if (fFlags & IEM_XCPT_FLAGS_ERR)
3882 *uStackFrame.pu64++ = uErr;
3883 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3884 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3885 uStackFrame.pu64[2] = fEfl;
3886 uStackFrame.pu64[3] = pCtx->rsp;
3887 uStackFrame.pu64[4] = pCtx->ss.Sel;
3888 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3889 if (rcStrict != VINF_SUCCESS)
3890 return rcStrict;
3891
3892 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3893 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3894 * after pushing the stack frame? (Write protect the gdt + stack to
3895 * find out.) */
3896 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3897 {
3898 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3899 if (rcStrict != VINF_SUCCESS)
3900 return rcStrict;
3901 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3902 }
3903
3904 /*
3905 * Start comitting the register changes.
3906 */
3907 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3908 * hidden registers when interrupting 32-bit or 16-bit code! */
3909 if (uNewCpl != pIemCpu->uCpl)
3910 {
3911 pCtx->ss.Sel = 0 | uNewCpl;
3912 pCtx->ss.ValidSel = 0 | uNewCpl;
3913 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3914 pCtx->ss.u32Limit = UINT32_MAX;
3915 pCtx->ss.u64Base = 0;
3916 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3917 }
3918 pCtx->rsp = uNewRsp - cbStackFrame;
3919 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3920 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3921 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3922 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3923 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3924 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3925 pCtx->rip = uNewRip;
3926 pIemCpu->uCpl = uNewCpl;
3927
3928 fEfl &= ~fEflToClear;
3929 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3930
3931 if (fFlags & IEM_XCPT_FLAGS_CR2)
3932 pCtx->cr2 = uCr2;
3933
3934 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3935 iemRaiseXcptAdjustState(pCtx, u8Vector);
3936
3937 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3938}
3939
3940
3941/**
3942 * Implements exceptions and interrupts.
3943 *
3944 * All exceptions and interrupts goes thru this function!
3945 *
3946 * @returns VBox strict status code.
3947 * @param pIemCpu The IEM per CPU instance data.
3948 * @param cbInstr The number of bytes to offset rIP by in the return
3949 * address.
3950 * @param u8Vector The interrupt / exception vector number.
3951 * @param fFlags The flags.
3952 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3953 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3954 */
3955DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3956iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3957 uint8_t cbInstr,
3958 uint8_t u8Vector,
3959 uint32_t fFlags,
3960 uint16_t uErr,
3961 uint64_t uCr2)
3962{
3963 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3964#ifdef IN_RING0
3965 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3966 AssertRCReturn(rc, rc);
3967#endif
3968
3969 /*
3970 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3971 */
3972 if ( pCtx->eflags.Bits.u1VM
3973 && pCtx->eflags.Bits.u2IOPL != 3
3974 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3975 && (pCtx->cr0 & X86_CR0_PE) )
3976 {
3977 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3978 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3979 u8Vector = X86_XCPT_GP;
3980 uErr = 0;
3981 }
3982#ifdef DBGFTRACE_ENABLED
3983 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3984 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3985 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3986#endif
3987
3988 /*
3989 * Do recursion accounting.
3990 */
3991 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3992 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3993 if (pIemCpu->cXcptRecursions == 0)
3994 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3995 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3996 else
3997 {
3998 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3999 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
4000
4001 /** @todo double and tripple faults. */
4002 if (pIemCpu->cXcptRecursions >= 3)
4003 {
4004#ifdef DEBUG_bird
4005 AssertFailed();
4006#endif
4007 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4008 }
4009
4010 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
4011 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
4012 {
4013 ....
4014 } */
4015 }
4016 pIemCpu->cXcptRecursions++;
4017 pIemCpu->uCurXcpt = u8Vector;
4018 pIemCpu->fCurXcpt = fFlags;
4019
4020 /*
4021 * Extensive logging.
4022 */
4023#if defined(LOG_ENABLED) && defined(IN_RING3)
4024 if (LogIs3Enabled())
4025 {
4026 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4027 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4028 char szRegs[4096];
4029 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4030 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4031 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4032 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4033 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4034 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4035 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4036 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4037 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4038 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4039 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4040 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4041 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4042 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4043 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4044 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4045 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4046 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4047 " efer=%016VR{efer}\n"
4048 " pat=%016VR{pat}\n"
4049 " sf_mask=%016VR{sf_mask}\n"
4050 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4051 " lstar=%016VR{lstar}\n"
4052 " star=%016VR{star} cstar=%016VR{cstar}\n"
4053 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4054 );
4055
4056 char szInstr[256];
4057 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4058 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4059 szInstr, sizeof(szInstr), NULL);
4060 Log3(("%s%s\n", szRegs, szInstr));
4061 }
4062#endif /* LOG_ENABLED */
4063
4064 /*
4065 * Call the mode specific worker function.
4066 */
4067 VBOXSTRICTRC rcStrict;
4068 if (!(pCtx->cr0 & X86_CR0_PE))
4069 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4070 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4071 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4072 else
4073 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4074
4075 /*
4076 * Unwind.
4077 */
4078 pIemCpu->cXcptRecursions--;
4079 pIemCpu->uCurXcpt = uPrevXcpt;
4080 pIemCpu->fCurXcpt = fPrevXcpt;
4081 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4082 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4083 return rcStrict;
4084}
4085
4086
4087/** \#DE - 00. */
4088DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4089{
4090 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4091}
4092
4093
4094/** \#DB - 01.
4095 * @note This automatically clear DR7.GD. */
4096DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4097{
4098 /** @todo set/clear RF. */
4099 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4100 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4101}
4102
4103
4104/** \#UD - 06. */
4105DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4106{
4107 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4108}
4109
4110
4111/** \#NM - 07. */
4112DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4113{
4114 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4115}
4116
4117
4118/** \#TS(err) - 0a. */
4119DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4120{
4121 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4122}
4123
4124
4125/** \#TS(tr) - 0a. */
4126DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4127{
4128 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4129 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4130}
4131
4132
4133/** \#TS(0) - 0a. */
4134DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4135{
4136 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4137 0, 0);
4138}
4139
4140
4141/** \#TS(err) - 0a. */
4142DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4143{
4144 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4145 uSel & X86_SEL_MASK_OFF_RPL, 0);
4146}
4147
4148
4149/** \#NP(err) - 0b. */
4150DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4151{
4152 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4153}
4154
4155
4156/** \#NP(seg) - 0b. */
4157DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4158{
4159 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4160 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4161}
4162
4163
4164/** \#NP(sel) - 0b. */
4165DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4166{
4167 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4168 uSel & ~X86_SEL_RPL, 0);
4169}
4170
4171
4172/** \#SS(seg) - 0c. */
4173DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4174{
4175 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4176 uSel & ~X86_SEL_RPL, 0);
4177}
4178
4179
4180/** \#SS(err) - 0c. */
4181DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4182{
4183 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4184}
4185
4186
4187/** \#GP(n) - 0d. */
4188DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4189{
4190 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4191}
4192
4193
4194/** \#GP(0) - 0d. */
4195DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4196{
4197 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4198}
4199
4200
4201/** \#GP(sel) - 0d. */
4202DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4203{
4204 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4205 Sel & ~X86_SEL_RPL, 0);
4206}
4207
4208
4209/** \#GP(0) - 0d. */
4210DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4211{
4212 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4213}
4214
4215
4216/** \#GP(sel) - 0d. */
4217DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4218{
4219 NOREF(iSegReg); NOREF(fAccess);
4220 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4221 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4222}
4223
4224
4225/** \#GP(sel) - 0d. */
4226DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4227{
4228 NOREF(Sel);
4229 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4230}
4231
4232
4233/** \#GP(sel) - 0d. */
4234DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4235{
4236 NOREF(iSegReg); NOREF(fAccess);
4237 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4238}
4239
4240
4241/** \#PF(n) - 0e. */
4242DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4243{
4244 uint16_t uErr;
4245 switch (rc)
4246 {
4247 case VERR_PAGE_NOT_PRESENT:
4248 case VERR_PAGE_TABLE_NOT_PRESENT:
4249 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4250 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4251 uErr = 0;
4252 break;
4253
4254 default:
4255 AssertMsgFailed(("%Rrc\n", rc));
4256 case VERR_ACCESS_DENIED:
4257 uErr = X86_TRAP_PF_P;
4258 break;
4259
4260 /** @todo reserved */
4261 }
4262
4263 if (pIemCpu->uCpl == 3)
4264 uErr |= X86_TRAP_PF_US;
4265
4266 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4267 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4268 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4269 uErr |= X86_TRAP_PF_ID;
4270
4271#if 0 /* This is so much non-sense, really. Why was it done like that? */
4272 /* Note! RW access callers reporting a WRITE protection fault, will clear
4273 the READ flag before calling. So, read-modify-write accesses (RW)
4274 can safely be reported as READ faults. */
4275 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4276 uErr |= X86_TRAP_PF_RW;
4277#else
4278 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4279 {
4280 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4281 uErr |= X86_TRAP_PF_RW;
4282 }
4283#endif
4284
4285 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4286 uErr, GCPtrWhere);
4287}
4288
4289
4290/** \#MF(0) - 10. */
4291DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4292{
4293 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4294}
4295
4296
4297/** \#AC(0) - 11. */
4298DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4299{
4300 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4301}
4302
4303
4304/**
4305 * Macro for calling iemCImplRaiseDivideError().
4306 *
4307 * This enables us to add/remove arguments and force different levels of
4308 * inlining as we wish.
4309 *
4310 * @return Strict VBox status code.
4311 */
4312#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4313IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4314{
4315 NOREF(cbInstr);
4316 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4317}
4318
4319
4320/**
4321 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4322 *
4323 * This enables us to add/remove arguments and force different levels of
4324 * inlining as we wish.
4325 *
4326 * @return Strict VBox status code.
4327 */
4328#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4329IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4330{
4331 NOREF(cbInstr);
4332 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4333}
4334
4335
4336/**
4337 * Macro for calling iemCImplRaiseInvalidOpcode().
4338 *
4339 * This enables us to add/remove arguments and force different levels of
4340 * inlining as we wish.
4341 *
4342 * @return Strict VBox status code.
4343 */
4344#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4345IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4346{
4347 NOREF(cbInstr);
4348 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4349}
4350
4351
4352/** @} */
4353
4354
4355/*
4356 *
4357 * Helpers routines.
4358 * Helpers routines.
4359 * Helpers routines.
4360 *
4361 */
4362
4363/**
4364 * Recalculates the effective operand size.
4365 *
4366 * @param pIemCpu The IEM state.
4367 */
4368IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4369{
4370 switch (pIemCpu->enmCpuMode)
4371 {
4372 case IEMMODE_16BIT:
4373 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4374 break;
4375 case IEMMODE_32BIT:
4376 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4377 break;
4378 case IEMMODE_64BIT:
4379 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4380 {
4381 case 0:
4382 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4383 break;
4384 case IEM_OP_PRF_SIZE_OP:
4385 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4386 break;
4387 case IEM_OP_PRF_SIZE_REX_W:
4388 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4389 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4390 break;
4391 }
4392 break;
4393 default:
4394 AssertFailed();
4395 }
4396}
4397
4398
4399/**
4400 * Sets the default operand size to 64-bit and recalculates the effective
4401 * operand size.
4402 *
4403 * @param pIemCpu The IEM state.
4404 */
4405IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4406{
4407 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4408 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4409 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4410 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4411 else
4412 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4413}
4414
4415
4416/*
4417 *
4418 * Common opcode decoders.
4419 * Common opcode decoders.
4420 * Common opcode decoders.
4421 *
4422 */
4423//#include <iprt/mem.h>
4424
4425/**
4426 * Used to add extra details about a stub case.
4427 * @param pIemCpu The IEM per CPU state.
4428 */
4429IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4430{
4431#if defined(LOG_ENABLED) && defined(IN_RING3)
4432 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4433 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4434 char szRegs[4096];
4435 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4436 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4437 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4438 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4439 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4440 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4441 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4442 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4443 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4444 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4445 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4446 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4447 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4448 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4449 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4450 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4451 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4452 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4453 " efer=%016VR{efer}\n"
4454 " pat=%016VR{pat}\n"
4455 " sf_mask=%016VR{sf_mask}\n"
4456 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4457 " lstar=%016VR{lstar}\n"
4458 " star=%016VR{star} cstar=%016VR{cstar}\n"
4459 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4460 );
4461
4462 char szInstr[256];
4463 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4464 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4465 szInstr, sizeof(szInstr), NULL);
4466
4467 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4468#else
4469 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4470#endif
4471}
4472
4473/**
4474 * Complains about a stub.
4475 *
4476 * Providing two versions of this macro, one for daily use and one for use when
4477 * working on IEM.
4478 */
4479#if 0
4480# define IEMOP_BITCH_ABOUT_STUB() \
4481 do { \
4482 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4483 iemOpStubMsg2(pIemCpu); \
4484 RTAssertPanic(); \
4485 } while (0)
4486#else
4487# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4488#endif
4489
4490/** Stubs an opcode. */
4491#define FNIEMOP_STUB(a_Name) \
4492 FNIEMOP_DEF(a_Name) \
4493 { \
4494 IEMOP_BITCH_ABOUT_STUB(); \
4495 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4496 } \
4497 typedef int ignore_semicolon
4498
4499/** Stubs an opcode. */
4500#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4501 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4502 { \
4503 IEMOP_BITCH_ABOUT_STUB(); \
4504 NOREF(a_Name0); \
4505 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4506 } \
4507 typedef int ignore_semicolon
4508
4509/** Stubs an opcode which currently should raise \#UD. */
4510#define FNIEMOP_UD_STUB(a_Name) \
4511 FNIEMOP_DEF(a_Name) \
4512 { \
4513 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4514 return IEMOP_RAISE_INVALID_OPCODE(); \
4515 } \
4516 typedef int ignore_semicolon
4517
4518/** Stubs an opcode which currently should raise \#UD. */
4519#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4520 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4521 { \
4522 NOREF(a_Name0); \
4523 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4524 return IEMOP_RAISE_INVALID_OPCODE(); \
4525 } \
4526 typedef int ignore_semicolon
4527
4528
4529
4530/** @name Register Access.
4531 * @{
4532 */
4533
4534/**
4535 * Gets a reference (pointer) to the specified hidden segment register.
4536 *
4537 * @returns Hidden register reference.
4538 * @param pIemCpu The per CPU data.
4539 * @param iSegReg The segment register.
4540 */
4541IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4542{
4543 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4544 PCPUMSELREG pSReg;
4545 switch (iSegReg)
4546 {
4547 case X86_SREG_ES: pSReg = &pCtx->es; break;
4548 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4549 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4550 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4551 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4552 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4553 default:
4554 AssertFailedReturn(NULL);
4555 }
4556#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4557 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4558 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4559#else
4560 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4561#endif
4562 return pSReg;
4563}
4564
4565
4566/**
4567 * Gets a reference (pointer) to the specified segment register (the selector
4568 * value).
4569 *
4570 * @returns Pointer to the selector variable.
4571 * @param pIemCpu The per CPU data.
4572 * @param iSegReg The segment register.
4573 */
4574IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4575{
4576 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4577 switch (iSegReg)
4578 {
4579 case X86_SREG_ES: return &pCtx->es.Sel;
4580 case X86_SREG_CS: return &pCtx->cs.Sel;
4581 case X86_SREG_SS: return &pCtx->ss.Sel;
4582 case X86_SREG_DS: return &pCtx->ds.Sel;
4583 case X86_SREG_FS: return &pCtx->fs.Sel;
4584 case X86_SREG_GS: return &pCtx->gs.Sel;
4585 }
4586 AssertFailedReturn(NULL);
4587}
4588
4589
4590/**
4591 * Fetches the selector value of a segment register.
4592 *
4593 * @returns The selector value.
4594 * @param pIemCpu The per CPU data.
4595 * @param iSegReg The segment register.
4596 */
4597IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4598{
4599 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4600 switch (iSegReg)
4601 {
4602 case X86_SREG_ES: return pCtx->es.Sel;
4603 case X86_SREG_CS: return pCtx->cs.Sel;
4604 case X86_SREG_SS: return pCtx->ss.Sel;
4605 case X86_SREG_DS: return pCtx->ds.Sel;
4606 case X86_SREG_FS: return pCtx->fs.Sel;
4607 case X86_SREG_GS: return pCtx->gs.Sel;
4608 }
4609 AssertFailedReturn(0xffff);
4610}
4611
4612
4613/**
4614 * Gets a reference (pointer) to the specified general register.
4615 *
4616 * @returns Register reference.
4617 * @param pIemCpu The per CPU data.
4618 * @param iReg The general register.
4619 */
4620IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4621{
4622 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4623 switch (iReg)
4624 {
4625 case X86_GREG_xAX: return &pCtx->rax;
4626 case X86_GREG_xCX: return &pCtx->rcx;
4627 case X86_GREG_xDX: return &pCtx->rdx;
4628 case X86_GREG_xBX: return &pCtx->rbx;
4629 case X86_GREG_xSP: return &pCtx->rsp;
4630 case X86_GREG_xBP: return &pCtx->rbp;
4631 case X86_GREG_xSI: return &pCtx->rsi;
4632 case X86_GREG_xDI: return &pCtx->rdi;
4633 case X86_GREG_x8: return &pCtx->r8;
4634 case X86_GREG_x9: return &pCtx->r9;
4635 case X86_GREG_x10: return &pCtx->r10;
4636 case X86_GREG_x11: return &pCtx->r11;
4637 case X86_GREG_x12: return &pCtx->r12;
4638 case X86_GREG_x13: return &pCtx->r13;
4639 case X86_GREG_x14: return &pCtx->r14;
4640 case X86_GREG_x15: return &pCtx->r15;
4641 }
4642 AssertFailedReturn(NULL);
4643}
4644
4645
4646/**
4647 * Gets a reference (pointer) to the specified 8-bit general register.
4648 *
4649 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4650 *
4651 * @returns Register reference.
4652 * @param pIemCpu The per CPU data.
4653 * @param iReg The register.
4654 */
4655IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4656{
4657 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4658 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4659
4660 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4661 if (iReg >= 4)
4662 pu8Reg++;
4663 return pu8Reg;
4664}
4665
4666
4667/**
4668 * Fetches the value of a 8-bit general register.
4669 *
4670 * @returns The register value.
4671 * @param pIemCpu The per CPU data.
4672 * @param iReg The register.
4673 */
4674IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4675{
4676 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4677 return *pbSrc;
4678}
4679
4680
4681/**
4682 * Fetches the value of a 16-bit general register.
4683 *
4684 * @returns The register value.
4685 * @param pIemCpu The per CPU data.
4686 * @param iReg The register.
4687 */
4688IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4689{
4690 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4691}
4692
4693
4694/**
4695 * Fetches the value of a 32-bit general register.
4696 *
4697 * @returns The register value.
4698 * @param pIemCpu The per CPU data.
4699 * @param iReg The register.
4700 */
4701IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4702{
4703 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4704}
4705
4706
4707/**
4708 * Fetches the value of a 64-bit general register.
4709 *
4710 * @returns The register value.
4711 * @param pIemCpu The per CPU data.
4712 * @param iReg The register.
4713 */
4714IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4715{
4716 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4717}
4718
4719
4720/**
4721 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4722 *
4723 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4724 * segment limit.
4725 *
4726 * @param pIemCpu The per CPU data.
4727 * @param offNextInstr The offset of the next instruction.
4728 */
4729IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4730{
4731 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4732 switch (pIemCpu->enmEffOpSize)
4733 {
4734 case IEMMODE_16BIT:
4735 {
4736 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4737 if ( uNewIp > pCtx->cs.u32Limit
4738 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4739 return iemRaiseGeneralProtectionFault0(pIemCpu);
4740 pCtx->rip = uNewIp;
4741 break;
4742 }
4743
4744 case IEMMODE_32BIT:
4745 {
4746 Assert(pCtx->rip <= UINT32_MAX);
4747 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4748
4749 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4750 if (uNewEip > pCtx->cs.u32Limit)
4751 return iemRaiseGeneralProtectionFault0(pIemCpu);
4752 pCtx->rip = uNewEip;
4753 break;
4754 }
4755
4756 case IEMMODE_64BIT:
4757 {
4758 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4759
4760 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4761 if (!IEM_IS_CANONICAL(uNewRip))
4762 return iemRaiseGeneralProtectionFault0(pIemCpu);
4763 pCtx->rip = uNewRip;
4764 break;
4765 }
4766
4767 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4768 }
4769
4770 pCtx->eflags.Bits.u1RF = 0;
4771 return VINF_SUCCESS;
4772}
4773
4774
4775/**
4776 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4777 *
4778 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4779 * segment limit.
4780 *
4781 * @returns Strict VBox status code.
4782 * @param pIemCpu The per CPU data.
4783 * @param offNextInstr The offset of the next instruction.
4784 */
4785IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4786{
4787 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4788 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4789
4790 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4791 if ( uNewIp > pCtx->cs.u32Limit
4792 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4793 return iemRaiseGeneralProtectionFault0(pIemCpu);
4794 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4795 pCtx->rip = uNewIp;
4796 pCtx->eflags.Bits.u1RF = 0;
4797
4798 return VINF_SUCCESS;
4799}
4800
4801
4802/**
4803 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4804 *
4805 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4806 * segment limit.
4807 *
4808 * @returns Strict VBox status code.
4809 * @param pIemCpu The per CPU data.
4810 * @param offNextInstr The offset of the next instruction.
4811 */
4812IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4813{
4814 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4815 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4816
4817 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4818 {
4819 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4820
4821 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4822 if (uNewEip > pCtx->cs.u32Limit)
4823 return iemRaiseGeneralProtectionFault0(pIemCpu);
4824 pCtx->rip = uNewEip;
4825 }
4826 else
4827 {
4828 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4829
4830 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4831 if (!IEM_IS_CANONICAL(uNewRip))
4832 return iemRaiseGeneralProtectionFault0(pIemCpu);
4833 pCtx->rip = uNewRip;
4834 }
4835 pCtx->eflags.Bits.u1RF = 0;
4836 return VINF_SUCCESS;
4837}
4838
4839
4840/**
4841 * Performs a near jump to the specified address.
4842 *
4843 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4844 * segment limit.
4845 *
4846 * @param pIemCpu The per CPU data.
4847 * @param uNewRip The new RIP value.
4848 */
4849IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4850{
4851 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4852 switch (pIemCpu->enmEffOpSize)
4853 {
4854 case IEMMODE_16BIT:
4855 {
4856 Assert(uNewRip <= UINT16_MAX);
4857 if ( uNewRip > pCtx->cs.u32Limit
4858 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4859 return iemRaiseGeneralProtectionFault0(pIemCpu);
4860 /** @todo Test 16-bit jump in 64-bit mode. */
4861 pCtx->rip = uNewRip;
4862 break;
4863 }
4864
4865 case IEMMODE_32BIT:
4866 {
4867 Assert(uNewRip <= UINT32_MAX);
4868 Assert(pCtx->rip <= UINT32_MAX);
4869 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4870
4871 if (uNewRip > pCtx->cs.u32Limit)
4872 return iemRaiseGeneralProtectionFault0(pIemCpu);
4873 pCtx->rip = uNewRip;
4874 break;
4875 }
4876
4877 case IEMMODE_64BIT:
4878 {
4879 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4880
4881 if (!IEM_IS_CANONICAL(uNewRip))
4882 return iemRaiseGeneralProtectionFault0(pIemCpu);
4883 pCtx->rip = uNewRip;
4884 break;
4885 }
4886
4887 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4888 }
4889
4890 pCtx->eflags.Bits.u1RF = 0;
4891 return VINF_SUCCESS;
4892}
4893
4894
4895/**
4896 * Get the address of the top of the stack.
4897 *
4898 * @param pIemCpu The per CPU data.
4899 * @param pCtx The CPU context which SP/ESP/RSP should be
4900 * read.
4901 */
4902DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4903{
4904 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4905 return pCtx->rsp;
4906 if (pCtx->ss.Attr.n.u1DefBig)
4907 return pCtx->esp;
4908 return pCtx->sp;
4909}
4910
4911
4912/**
4913 * Updates the RIP/EIP/IP to point to the next instruction.
4914 *
4915 * This function leaves the EFLAGS.RF flag alone.
4916 *
4917 * @param pIemCpu The per CPU data.
4918 * @param cbInstr The number of bytes to add.
4919 */
4920IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4921{
4922 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4923 switch (pIemCpu->enmCpuMode)
4924 {
4925 case IEMMODE_16BIT:
4926 Assert(pCtx->rip <= UINT16_MAX);
4927 pCtx->eip += cbInstr;
4928 pCtx->eip &= UINT32_C(0xffff);
4929 break;
4930
4931 case IEMMODE_32BIT:
4932 pCtx->eip += cbInstr;
4933 Assert(pCtx->rip <= UINT32_MAX);
4934 break;
4935
4936 case IEMMODE_64BIT:
4937 pCtx->rip += cbInstr;
4938 break;
4939 default: AssertFailed();
4940 }
4941}
4942
4943
4944#if 0
4945/**
4946 * Updates the RIP/EIP/IP to point to the next instruction.
4947 *
4948 * @param pIemCpu The per CPU data.
4949 */
4950IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4951{
4952 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4953}
4954#endif
4955
4956
4957
4958/**
4959 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4960 *
4961 * @param pIemCpu The per CPU data.
4962 * @param cbInstr The number of bytes to add.
4963 */
4964IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4965{
4966 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4967
4968 pCtx->eflags.Bits.u1RF = 0;
4969
4970 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4971 switch (pIemCpu->enmCpuMode)
4972 {
4973 /** @todo investigate if EIP or RIP is really incremented. */
4974 case IEMMODE_16BIT:
4975 case IEMMODE_32BIT:
4976 pCtx->eip += cbInstr;
4977 Assert(pCtx->rip <= UINT32_MAX);
4978 break;
4979
4980 case IEMMODE_64BIT:
4981 pCtx->rip += cbInstr;
4982 break;
4983 default: AssertFailed();
4984 }
4985}
4986
4987
4988/**
4989 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4990 *
4991 * @param pIemCpu The per CPU data.
4992 */
4993IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4994{
4995 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4996}
4997
4998
4999/**
5000 * Adds to the stack pointer.
5001 *
5002 * @param pIemCpu The per CPU data.
5003 * @param pCtx The CPU context which SP/ESP/RSP should be
5004 * updated.
5005 * @param cbToAdd The number of bytes to add.
5006 */
5007DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
5008{
5009 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5010 pCtx->rsp += cbToAdd;
5011 else if (pCtx->ss.Attr.n.u1DefBig)
5012 pCtx->esp += cbToAdd;
5013 else
5014 pCtx->sp += cbToAdd;
5015}
5016
5017
5018/**
5019 * Subtracts from the stack pointer.
5020 *
5021 * @param pIemCpu The per CPU data.
5022 * @param pCtx The CPU context which SP/ESP/RSP should be
5023 * updated.
5024 * @param cbToSub The number of bytes to subtract.
5025 */
5026DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
5027{
5028 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5029 pCtx->rsp -= cbToSub;
5030 else if (pCtx->ss.Attr.n.u1DefBig)
5031 pCtx->esp -= cbToSub;
5032 else
5033 pCtx->sp -= cbToSub;
5034}
5035
5036
5037/**
5038 * Adds to the temporary stack pointer.
5039 *
5040 * @param pIemCpu The per CPU data.
5041 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5042 * @param cbToAdd The number of bytes to add.
5043 * @param pCtx Where to get the current stack mode.
5044 */
5045DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
5046{
5047 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5048 pTmpRsp->u += cbToAdd;
5049 else if (pCtx->ss.Attr.n.u1DefBig)
5050 pTmpRsp->DWords.dw0 += cbToAdd;
5051 else
5052 pTmpRsp->Words.w0 += cbToAdd;
5053}
5054
5055
5056/**
5057 * Subtracts from the temporary stack pointer.
5058 *
5059 * @param pIemCpu The per CPU data.
5060 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5061 * @param cbToSub The number of bytes to subtract.
5062 * @param pCtx Where to get the current stack mode.
5063 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5064 * expecting that.
5065 */
5066DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5067{
5068 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5069 pTmpRsp->u -= cbToSub;
5070 else if (pCtx->ss.Attr.n.u1DefBig)
5071 pTmpRsp->DWords.dw0 -= cbToSub;
5072 else
5073 pTmpRsp->Words.w0 -= cbToSub;
5074}
5075
5076
5077/**
5078 * Calculates the effective stack address for a push of the specified size as
5079 * well as the new RSP value (upper bits may be masked).
5080 *
5081 * @returns Effective stack addressf for the push.
5082 * @param pIemCpu The IEM per CPU data.
5083 * @param pCtx Where to get the current stack mode.
5084 * @param cbItem The size of the stack item to pop.
5085 * @param puNewRsp Where to return the new RSP value.
5086 */
5087DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5088{
5089 RTUINT64U uTmpRsp;
5090 RTGCPTR GCPtrTop;
5091 uTmpRsp.u = pCtx->rsp;
5092
5093 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5094 GCPtrTop = uTmpRsp.u -= cbItem;
5095 else if (pCtx->ss.Attr.n.u1DefBig)
5096 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5097 else
5098 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5099 *puNewRsp = uTmpRsp.u;
5100 return GCPtrTop;
5101}
5102
5103
5104/**
5105 * Gets the current stack pointer and calculates the value after a pop of the
5106 * specified size.
5107 *
5108 * @returns Current stack pointer.
5109 * @param pIemCpu The per CPU data.
5110 * @param pCtx Where to get the current stack mode.
5111 * @param cbItem The size of the stack item to pop.
5112 * @param puNewRsp Where to return the new RSP value.
5113 */
5114DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5115{
5116 RTUINT64U uTmpRsp;
5117 RTGCPTR GCPtrTop;
5118 uTmpRsp.u = pCtx->rsp;
5119
5120 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5121 {
5122 GCPtrTop = uTmpRsp.u;
5123 uTmpRsp.u += cbItem;
5124 }
5125 else if (pCtx->ss.Attr.n.u1DefBig)
5126 {
5127 GCPtrTop = uTmpRsp.DWords.dw0;
5128 uTmpRsp.DWords.dw0 += cbItem;
5129 }
5130 else
5131 {
5132 GCPtrTop = uTmpRsp.Words.w0;
5133 uTmpRsp.Words.w0 += cbItem;
5134 }
5135 *puNewRsp = uTmpRsp.u;
5136 return GCPtrTop;
5137}
5138
5139
5140/**
5141 * Calculates the effective stack address for a push of the specified size as
5142 * well as the new temporary RSP value (upper bits may be masked).
5143 *
5144 * @returns Effective stack addressf for the push.
5145 * @param pIemCpu The per CPU data.
5146 * @param pCtx Where to get the current stack mode.
5147 * @param pTmpRsp The temporary stack pointer. This is updated.
5148 * @param cbItem The size of the stack item to pop.
5149 */
5150DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5151{
5152 RTGCPTR GCPtrTop;
5153
5154 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5155 GCPtrTop = pTmpRsp->u -= cbItem;
5156 else if (pCtx->ss.Attr.n.u1DefBig)
5157 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5158 else
5159 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5160 return GCPtrTop;
5161}
5162
5163
5164/**
5165 * Gets the effective stack address for a pop of the specified size and
5166 * calculates and updates the temporary RSP.
5167 *
5168 * @returns Current stack pointer.
5169 * @param pIemCpu The per CPU data.
5170 * @param pCtx Where to get the current stack mode.
5171 * @param pTmpRsp The temporary stack pointer. This is updated.
5172 * @param cbItem The size of the stack item to pop.
5173 */
5174DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5175{
5176 RTGCPTR GCPtrTop;
5177 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5178 {
5179 GCPtrTop = pTmpRsp->u;
5180 pTmpRsp->u += cbItem;
5181 }
5182 else if (pCtx->ss.Attr.n.u1DefBig)
5183 {
5184 GCPtrTop = pTmpRsp->DWords.dw0;
5185 pTmpRsp->DWords.dw0 += cbItem;
5186 }
5187 else
5188 {
5189 GCPtrTop = pTmpRsp->Words.w0;
5190 pTmpRsp->Words.w0 += cbItem;
5191 }
5192 return GCPtrTop;
5193}
5194
5195/** @} */
5196
5197
5198/** @name FPU access and helpers.
5199 *
5200 * @{
5201 */
5202
5203
5204/**
5205 * Hook for preparing to use the host FPU.
5206 *
5207 * This is necessary in ring-0 and raw-mode context.
5208 *
5209 * @param pIemCpu The IEM per CPU data.
5210 */
5211DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5212{
5213#ifdef IN_RING3
5214 NOREF(pIemCpu);
5215#else
5216/** @todo RZ: FIXME */
5217//# error "Implement me"
5218#endif
5219}
5220
5221
5222/**
5223 * Hook for preparing to use the host FPU for SSE
5224 *
5225 * This is necessary in ring-0 and raw-mode context.
5226 *
5227 * @param pIemCpu The IEM per CPU data.
5228 */
5229DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5230{
5231 iemFpuPrepareUsage(pIemCpu);
5232}
5233
5234
5235/**
5236 * Stores a QNaN value into a FPU register.
5237 *
5238 * @param pReg Pointer to the register.
5239 */
5240DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5241{
5242 pReg->au32[0] = UINT32_C(0x00000000);
5243 pReg->au32[1] = UINT32_C(0xc0000000);
5244 pReg->au16[4] = UINT16_C(0xffff);
5245}
5246
5247
5248/**
5249 * Updates the FOP, FPU.CS and FPUIP registers.
5250 *
5251 * @param pIemCpu The IEM per CPU data.
5252 * @param pCtx The CPU context.
5253 * @param pFpuCtx The FPU context.
5254 */
5255DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5256{
5257 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5258 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5259 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5260 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5261 {
5262 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5263 * happens in real mode here based on the fnsave and fnstenv images. */
5264 pFpuCtx->CS = 0;
5265 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5266 }
5267 else
5268 {
5269 pFpuCtx->CS = pCtx->cs.Sel;
5270 pFpuCtx->FPUIP = pCtx->rip;
5271 }
5272}
5273
5274
5275/**
5276 * Updates the x87.DS and FPUDP registers.
5277 *
5278 * @param pIemCpu The IEM per CPU data.
5279 * @param pCtx The CPU context.
5280 * @param pFpuCtx The FPU context.
5281 * @param iEffSeg The effective segment register.
5282 * @param GCPtrEff The effective address relative to @a iEffSeg.
5283 */
5284DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5285{
5286 RTSEL sel;
5287 switch (iEffSeg)
5288 {
5289 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5290 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5291 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5292 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5293 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5294 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5295 default:
5296 AssertMsgFailed(("%d\n", iEffSeg));
5297 sel = pCtx->ds.Sel;
5298 }
5299 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5300 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5301 {
5302 pFpuCtx->DS = 0;
5303 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5304 }
5305 else
5306 {
5307 pFpuCtx->DS = sel;
5308 pFpuCtx->FPUDP = GCPtrEff;
5309 }
5310}
5311
5312
5313/**
5314 * Rotates the stack registers in the push direction.
5315 *
5316 * @param pFpuCtx The FPU context.
5317 * @remarks This is a complete waste of time, but fxsave stores the registers in
5318 * stack order.
5319 */
5320DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5321{
5322 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5323 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5324 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5325 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5326 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5327 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5328 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5329 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5330 pFpuCtx->aRegs[0].r80 = r80Tmp;
5331}
5332
5333
5334/**
5335 * Rotates the stack registers in the pop direction.
5336 *
5337 * @param pFpuCtx The FPU context.
5338 * @remarks This is a complete waste of time, but fxsave stores the registers in
5339 * stack order.
5340 */
5341DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5342{
5343 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5344 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5345 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5346 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5347 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5348 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5349 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5350 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5351 pFpuCtx->aRegs[7].r80 = r80Tmp;
5352}
5353
5354
5355/**
5356 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5357 * exception prevents it.
5358 *
5359 * @param pIemCpu The IEM per CPU data.
5360 * @param pResult The FPU operation result to push.
5361 * @param pFpuCtx The FPU context.
5362 */
5363IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5364{
5365 /* Update FSW and bail if there are pending exceptions afterwards. */
5366 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5367 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5368 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5369 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5370 {
5371 pFpuCtx->FSW = fFsw;
5372 return;
5373 }
5374
5375 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5376 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5377 {
5378 /* All is fine, push the actual value. */
5379 pFpuCtx->FTW |= RT_BIT(iNewTop);
5380 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5381 }
5382 else if (pFpuCtx->FCW & X86_FCW_IM)
5383 {
5384 /* Masked stack overflow, push QNaN. */
5385 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5386 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5387 }
5388 else
5389 {
5390 /* Raise stack overflow, don't push anything. */
5391 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5392 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5393 return;
5394 }
5395
5396 fFsw &= ~X86_FSW_TOP_MASK;
5397 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5398 pFpuCtx->FSW = fFsw;
5399
5400 iemFpuRotateStackPush(pFpuCtx);
5401}
5402
5403
5404/**
5405 * Stores a result in a FPU register and updates the FSW and FTW.
5406 *
5407 * @param pFpuCtx The FPU context.
5408 * @param pResult The result to store.
5409 * @param iStReg Which FPU register to store it in.
5410 */
5411IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5412{
5413 Assert(iStReg < 8);
5414 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5415 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5416 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5417 pFpuCtx->FTW |= RT_BIT(iReg);
5418 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5419}
5420
5421
5422/**
5423 * Only updates the FPU status word (FSW) with the result of the current
5424 * instruction.
5425 *
5426 * @param pFpuCtx The FPU context.
5427 * @param u16FSW The FSW output of the current instruction.
5428 */
5429IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5430{
5431 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5432 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5433}
5434
5435
5436/**
5437 * Pops one item off the FPU stack if no pending exception prevents it.
5438 *
5439 * @param pFpuCtx The FPU context.
5440 */
5441IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5442{
5443 /* Check pending exceptions. */
5444 uint16_t uFSW = pFpuCtx->FSW;
5445 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5446 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5447 return;
5448
5449 /* TOP--. */
5450 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5451 uFSW &= ~X86_FSW_TOP_MASK;
5452 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5453 pFpuCtx->FSW = uFSW;
5454
5455 /* Mark the previous ST0 as empty. */
5456 iOldTop >>= X86_FSW_TOP_SHIFT;
5457 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5458
5459 /* Rotate the registers. */
5460 iemFpuRotateStackPop(pFpuCtx);
5461}
5462
5463
5464/**
5465 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5466 *
5467 * @param pIemCpu The IEM per CPU data.
5468 * @param pResult The FPU operation result to push.
5469 */
5470IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5471{
5472 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5473 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5474 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5475 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5476}
5477
5478
5479/**
5480 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5481 * and sets FPUDP and FPUDS.
5482 *
5483 * @param pIemCpu The IEM per CPU data.
5484 * @param pResult The FPU operation result to push.
5485 * @param iEffSeg The effective segment register.
5486 * @param GCPtrEff The effective address relative to @a iEffSeg.
5487 */
5488IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5489{
5490 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5491 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5492 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5493 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5494 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5495}
5496
5497
5498/**
5499 * Replace ST0 with the first value and push the second onto the FPU stack,
5500 * unless a pending exception prevents it.
5501 *
5502 * @param pIemCpu The IEM per CPU data.
5503 * @param pResult The FPU operation result to store and push.
5504 */
5505IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5506{
5507 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5508 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5509 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5510
5511 /* Update FSW and bail if there are pending exceptions afterwards. */
5512 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5513 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5514 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5515 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5516 {
5517 pFpuCtx->FSW = fFsw;
5518 return;
5519 }
5520
5521 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5522 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5523 {
5524 /* All is fine, push the actual value. */
5525 pFpuCtx->FTW |= RT_BIT(iNewTop);
5526 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5527 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5528 }
5529 else if (pFpuCtx->FCW & X86_FCW_IM)
5530 {
5531 /* Masked stack overflow, push QNaN. */
5532 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5533 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5534 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5535 }
5536 else
5537 {
5538 /* Raise stack overflow, don't push anything. */
5539 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5540 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5541 return;
5542 }
5543
5544 fFsw &= ~X86_FSW_TOP_MASK;
5545 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5546 pFpuCtx->FSW = fFsw;
5547
5548 iemFpuRotateStackPush(pFpuCtx);
5549}
5550
5551
5552/**
5553 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5554 * FOP.
5555 *
5556 * @param pIemCpu The IEM per CPU data.
5557 * @param pResult The result to store.
5558 * @param iStReg Which FPU register to store it in.
5559 */
5560IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5561{
5562 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5563 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5564 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5565 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5566}
5567
5568
5569/**
5570 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5571 * FOP, and then pops the stack.
5572 *
5573 * @param pIemCpu The IEM per CPU data.
5574 * @param pResult The result to store.
5575 * @param iStReg Which FPU register to store it in.
5576 */
5577IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5578{
5579 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5580 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5581 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5582 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5583 iemFpuMaybePopOne(pFpuCtx);
5584}
5585
5586
5587/**
5588 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5589 * FPUDP, and FPUDS.
5590 *
5591 * @param pIemCpu The IEM per CPU data.
5592 * @param pResult The result to store.
5593 * @param iStReg Which FPU register to store it in.
5594 * @param iEffSeg The effective memory operand selector register.
5595 * @param GCPtrEff The effective memory operand offset.
5596 */
5597IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5598 uint8_t iEffSeg, RTGCPTR GCPtrEff)
5599{
5600 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5601 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5602 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5603 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5604 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5605}
5606
5607
5608/**
5609 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5610 * FPUDP, and FPUDS, and then pops the stack.
5611 *
5612 * @param pIemCpu The IEM per CPU data.
5613 * @param pResult The result to store.
5614 * @param iStReg Which FPU register to store it in.
5615 * @param iEffSeg The effective memory operand selector register.
5616 * @param GCPtrEff The effective memory operand offset.
5617 */
5618IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5619 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5620{
5621 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5622 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5623 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5624 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5625 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5626 iemFpuMaybePopOne(pFpuCtx);
5627}
5628
5629
5630/**
5631 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5632 *
5633 * @param pIemCpu The IEM per CPU data.
5634 */
5635IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5636{
5637 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5638 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5639 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5640}
5641
5642
5643/**
5644 * Marks the specified stack register as free (for FFREE).
5645 *
5646 * @param pIemCpu The IEM per CPU data.
5647 * @param iStReg The register to free.
5648 */
5649IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5650{
5651 Assert(iStReg < 8);
5652 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5653 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5654 pFpuCtx->FTW &= ~RT_BIT(iReg);
5655}
5656
5657
5658/**
5659 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5660 *
5661 * @param pIemCpu The IEM per CPU data.
5662 */
5663IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5664{
5665 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5666 uint16_t uFsw = pFpuCtx->FSW;
5667 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5668 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5669 uFsw &= ~X86_FSW_TOP_MASK;
5670 uFsw |= uTop;
5671 pFpuCtx->FSW = uFsw;
5672}
5673
5674
5675/**
5676 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5677 *
5678 * @param pIemCpu The IEM per CPU data.
5679 */
5680IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5681{
5682 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5683 uint16_t uFsw = pFpuCtx->FSW;
5684 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5685 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5686 uFsw &= ~X86_FSW_TOP_MASK;
5687 uFsw |= uTop;
5688 pFpuCtx->FSW = uFsw;
5689}
5690
5691
5692/**
5693 * Updates the FSW, FOP, FPUIP, and FPUCS.
5694 *
5695 * @param pIemCpu The IEM per CPU data.
5696 * @param u16FSW The FSW from the current instruction.
5697 */
5698IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5699{
5700 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5701 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5702 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5703 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5704}
5705
5706
5707/**
5708 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5709 *
5710 * @param pIemCpu The IEM per CPU data.
5711 * @param u16FSW The FSW from the current instruction.
5712 */
5713IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5714{
5715 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5716 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5717 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5718 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5719 iemFpuMaybePopOne(pFpuCtx);
5720}
5721
5722
5723/**
5724 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5725 *
5726 * @param pIemCpu The IEM per CPU data.
5727 * @param u16FSW The FSW from the current instruction.
5728 * @param iEffSeg The effective memory operand selector register.
5729 * @param GCPtrEff The effective memory operand offset.
5730 */
5731IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5732{
5733 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5734 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5735 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5736 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5737 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5738}
5739
5740
5741/**
5742 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5743 *
5744 * @param pIemCpu The IEM per CPU data.
5745 * @param u16FSW The FSW from the current instruction.
5746 */
5747IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5748{
5749 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5750 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5751 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5752 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5753 iemFpuMaybePopOne(pFpuCtx);
5754 iemFpuMaybePopOne(pFpuCtx);
5755}
5756
5757
5758/**
5759 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5760 *
5761 * @param pIemCpu The IEM per CPU data.
5762 * @param u16FSW The FSW from the current instruction.
5763 * @param iEffSeg The effective memory operand selector register.
5764 * @param GCPtrEff The effective memory operand offset.
5765 */
5766IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5767{
5768 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5769 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5770 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5771 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5772 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5773 iemFpuMaybePopOne(pFpuCtx);
5774}
5775
5776
5777/**
5778 * Worker routine for raising an FPU stack underflow exception.
5779 *
5780 * @param pIemCpu The IEM per CPU data.
5781 * @param pFpuCtx The FPU context.
5782 * @param iStReg The stack register being accessed.
5783 */
5784IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5785{
5786 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5787 if (pFpuCtx->FCW & X86_FCW_IM)
5788 {
5789 /* Masked underflow. */
5790 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5791 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5792 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5793 if (iStReg != UINT8_MAX)
5794 {
5795 pFpuCtx->FTW |= RT_BIT(iReg);
5796 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5797 }
5798 }
5799 else
5800 {
5801 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5802 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5803 }
5804}
5805
5806
5807/**
5808 * Raises a FPU stack underflow exception.
5809 *
5810 * @param pIemCpu The IEM per CPU data.
5811 * @param iStReg The destination register that should be loaded
5812 * with QNaN if \#IS is not masked. Specify
5813 * UINT8_MAX if none (like for fcom).
5814 */
5815DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5816{
5817 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5818 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5819 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5820 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5821}
5822
5823
5824DECL_NO_INLINE(IEM_STATIC, void)
5825iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5826{
5827 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5828 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5829 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5830 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5831 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5832}
5833
5834
5835DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5836{
5837 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5838 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5839 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5840 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5841 iemFpuMaybePopOne(pFpuCtx);
5842}
5843
5844
5845DECL_NO_INLINE(IEM_STATIC, void)
5846iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5847{
5848 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5849 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5850 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5851 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5852 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5853 iemFpuMaybePopOne(pFpuCtx);
5854}
5855
5856
5857DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5858{
5859 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5860 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5861 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5862 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5863 iemFpuMaybePopOne(pFpuCtx);
5864 iemFpuMaybePopOne(pFpuCtx);
5865}
5866
5867
5868DECL_NO_INLINE(IEM_STATIC, void)
5869iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5870{
5871 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5872 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5873 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5874
5875 if (pFpuCtx->FCW & X86_FCW_IM)
5876 {
5877 /* Masked overflow - Push QNaN. */
5878 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5879 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5880 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5881 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5882 pFpuCtx->FTW |= RT_BIT(iNewTop);
5883 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5884 iemFpuRotateStackPush(pFpuCtx);
5885 }
5886 else
5887 {
5888 /* Exception pending - don't change TOP or the register stack. */
5889 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5890 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5891 }
5892}
5893
5894
5895DECL_NO_INLINE(IEM_STATIC, void)
5896iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5897{
5898 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5899 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5900 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5901
5902 if (pFpuCtx->FCW & X86_FCW_IM)
5903 {
5904 /* Masked overflow - Push QNaN. */
5905 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5906 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5907 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5908 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5909 pFpuCtx->FTW |= RT_BIT(iNewTop);
5910 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5911 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5912 iemFpuRotateStackPush(pFpuCtx);
5913 }
5914 else
5915 {
5916 /* Exception pending - don't change TOP or the register stack. */
5917 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5918 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5919 }
5920}
5921
5922
5923/**
5924 * Worker routine for raising an FPU stack overflow exception on a push.
5925 *
5926 * @param pFpuCtx The FPU context.
5927 */
5928IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5929{
5930 if (pFpuCtx->FCW & X86_FCW_IM)
5931 {
5932 /* Masked overflow. */
5933 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5934 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5935 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5936 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5937 pFpuCtx->FTW |= RT_BIT(iNewTop);
5938 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5939 iemFpuRotateStackPush(pFpuCtx);
5940 }
5941 else
5942 {
5943 /* Exception pending - don't change TOP or the register stack. */
5944 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5945 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5946 }
5947}
5948
5949
5950/**
5951 * Raises a FPU stack overflow exception on a push.
5952 *
5953 * @param pIemCpu The IEM per CPU data.
5954 */
5955DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5956{
5957 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5958 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5959 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5960 iemFpuStackPushOverflowOnly(pFpuCtx);
5961}
5962
5963
5964/**
5965 * Raises a FPU stack overflow exception on a push with a memory operand.
5966 *
5967 * @param pIemCpu The IEM per CPU data.
5968 * @param iEffSeg The effective memory operand selector register.
5969 * @param GCPtrEff The effective memory operand offset.
5970 */
5971DECL_NO_INLINE(IEM_STATIC, void)
5972iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5973{
5974 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5975 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5976 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5977 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5978 iemFpuStackPushOverflowOnly(pFpuCtx);
5979}
5980
5981
5982IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5983{
5984 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5985 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5986 if (pFpuCtx->FTW & RT_BIT(iReg))
5987 return VINF_SUCCESS;
5988 return VERR_NOT_FOUND;
5989}
5990
5991
5992IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5993{
5994 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5995 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5996 if (pFpuCtx->FTW & RT_BIT(iReg))
5997 {
5998 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
5999 return VINF_SUCCESS;
6000 }
6001 return VERR_NOT_FOUND;
6002}
6003
6004
6005IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
6006 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
6007{
6008 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6009 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6010 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6011 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6012 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6013 {
6014 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6015 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
6016 return VINF_SUCCESS;
6017 }
6018 return VERR_NOT_FOUND;
6019}
6020
6021
6022IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
6023{
6024 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6025 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6026 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6027 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6028 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6029 {
6030 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6031 return VINF_SUCCESS;
6032 }
6033 return VERR_NOT_FOUND;
6034}
6035
6036
6037/**
6038 * Updates the FPU exception status after FCW is changed.
6039 *
6040 * @param pFpuCtx The FPU context.
6041 */
6042IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
6043{
6044 uint16_t u16Fsw = pFpuCtx->FSW;
6045 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
6046 u16Fsw |= X86_FSW_ES | X86_FSW_B;
6047 else
6048 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
6049 pFpuCtx->FSW = u16Fsw;
6050}
6051
6052
6053/**
6054 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6055 *
6056 * @returns The full FTW.
6057 * @param pFpuCtx The FPU context.
6058 */
6059IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6060{
6061 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6062 uint16_t u16Ftw = 0;
6063 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6064 for (unsigned iSt = 0; iSt < 8; iSt++)
6065 {
6066 unsigned const iReg = (iSt + iTop) & 7;
6067 if (!(u8Ftw & RT_BIT(iReg)))
6068 u16Ftw |= 3 << (iReg * 2); /* empty */
6069 else
6070 {
6071 uint16_t uTag;
6072 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6073 if (pr80Reg->s.uExponent == 0x7fff)
6074 uTag = 2; /* Exponent is all 1's => Special. */
6075 else if (pr80Reg->s.uExponent == 0x0000)
6076 {
6077 if (pr80Reg->s.u64Mantissa == 0x0000)
6078 uTag = 1; /* All bits are zero => Zero. */
6079 else
6080 uTag = 2; /* Must be special. */
6081 }
6082 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6083 uTag = 0; /* Valid. */
6084 else
6085 uTag = 2; /* Must be special. */
6086
6087 u16Ftw |= uTag << (iReg * 2); /* empty */
6088 }
6089 }
6090
6091 return u16Ftw;
6092}
6093
6094
6095/**
6096 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6097 *
6098 * @returns The compressed FTW.
6099 * @param u16FullFtw The full FTW to convert.
6100 */
6101IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6102{
6103 uint8_t u8Ftw = 0;
6104 for (unsigned i = 0; i < 8; i++)
6105 {
6106 if ((u16FullFtw & 3) != 3 /*empty*/)
6107 u8Ftw |= RT_BIT(i);
6108 u16FullFtw >>= 2;
6109 }
6110
6111 return u8Ftw;
6112}
6113
6114/** @} */
6115
6116
6117/** @name Memory access.
6118 *
6119 * @{
6120 */
6121
6122
6123/**
6124 * Updates the IEMCPU::cbWritten counter if applicable.
6125 *
6126 * @param pIemCpu The IEM per CPU data.
6127 * @param fAccess The access being accounted for.
6128 * @param cbMem The access size.
6129 */
6130DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6131{
6132 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6133 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6134 pIemCpu->cbWritten += (uint32_t)cbMem;
6135}
6136
6137
6138/**
6139 * Checks if the given segment can be written to, raise the appropriate
6140 * exception if not.
6141 *
6142 * @returns VBox strict status code.
6143 *
6144 * @param pIemCpu The IEM per CPU data.
6145 * @param pHid Pointer to the hidden register.
6146 * @param iSegReg The register number.
6147 * @param pu64BaseAddr Where to return the base address to use for the
6148 * segment. (In 64-bit code it may differ from the
6149 * base in the hidden segment.)
6150 */
6151IEM_STATIC VBOXSTRICTRC
6152iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6153{
6154 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6155 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6156 else
6157 {
6158 if (!pHid->Attr.n.u1Present)
6159 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6160
6161 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6162 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6163 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6164 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6165 *pu64BaseAddr = pHid->u64Base;
6166 }
6167 return VINF_SUCCESS;
6168}
6169
6170
6171/**
6172 * Checks if the given segment can be read from, raise the appropriate
6173 * exception if not.
6174 *
6175 * @returns VBox strict status code.
6176 *
6177 * @param pIemCpu The IEM per CPU data.
6178 * @param pHid Pointer to the hidden register.
6179 * @param iSegReg The register number.
6180 * @param pu64BaseAddr Where to return the base address to use for the
6181 * segment. (In 64-bit code it may differ from the
6182 * base in the hidden segment.)
6183 */
6184IEM_STATIC VBOXSTRICTRC
6185iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6186{
6187 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6188 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6189 else
6190 {
6191 if (!pHid->Attr.n.u1Present)
6192 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6193
6194 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6195 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6196 *pu64BaseAddr = pHid->u64Base;
6197 }
6198 return VINF_SUCCESS;
6199}
6200
6201
6202/**
6203 * Applies the segment limit, base and attributes.
6204 *
6205 * This may raise a \#GP or \#SS.
6206 *
6207 * @returns VBox strict status code.
6208 *
6209 * @param pIemCpu The IEM per CPU data.
6210 * @param fAccess The kind of access which is being performed.
6211 * @param iSegReg The index of the segment register to apply.
6212 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6213 * TSS, ++).
6214 * @param cbMem The access size.
6215 * @param pGCPtrMem Pointer to the guest memory address to apply
6216 * segmentation to. Input and output parameter.
6217 */
6218IEM_STATIC VBOXSTRICTRC
6219iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6220{
6221 if (iSegReg == UINT8_MAX)
6222 return VINF_SUCCESS;
6223
6224 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6225 switch (pIemCpu->enmCpuMode)
6226 {
6227 case IEMMODE_16BIT:
6228 case IEMMODE_32BIT:
6229 {
6230 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6231 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6232
6233 Assert(pSel->Attr.n.u1Present);
6234 Assert(pSel->Attr.n.u1DescType);
6235 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6236 {
6237 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6238 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6239 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6240
6241 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6242 {
6243 /** @todo CPL check. */
6244 }
6245
6246 /*
6247 * There are two kinds of data selectors, normal and expand down.
6248 */
6249 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6250 {
6251 if ( GCPtrFirst32 > pSel->u32Limit
6252 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6253 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6254 }
6255 else
6256 {
6257 /*
6258 * The upper boundary is defined by the B bit, not the G bit!
6259 */
6260 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6261 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6262 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6263 }
6264 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6265 }
6266 else
6267 {
6268
6269 /*
6270 * Code selector and usually be used to read thru, writing is
6271 * only permitted in real and V8086 mode.
6272 */
6273 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6274 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6275 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6276 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6277 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6278
6279 if ( GCPtrFirst32 > pSel->u32Limit
6280 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6281 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6282
6283 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6284 {
6285 /** @todo CPL check. */
6286 }
6287
6288 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6289 }
6290 return VINF_SUCCESS;
6291 }
6292
6293 case IEMMODE_64BIT:
6294 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6295 *pGCPtrMem += pSel->u64Base;
6296 return VINF_SUCCESS;
6297
6298 default:
6299 AssertFailedReturn(VERR_IEM_IPE_7);
6300 }
6301}
6302
6303
6304/**
6305 * Translates a virtual address to a physical physical address and checks if we
6306 * can access the page as specified.
6307 *
6308 * @param pIemCpu The IEM per CPU data.
6309 * @param GCPtrMem The virtual address.
6310 * @param fAccess The intended access.
6311 * @param pGCPhysMem Where to return the physical address.
6312 */
6313IEM_STATIC VBOXSTRICTRC
6314iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6315{
6316 /** @todo Need a different PGM interface here. We're currently using
6317 * generic / REM interfaces. this won't cut it for R0 & RC. */
6318 RTGCPHYS GCPhys;
6319 uint64_t fFlags;
6320 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6321 if (RT_FAILURE(rc))
6322 {
6323 /** @todo Check unassigned memory in unpaged mode. */
6324 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6325 *pGCPhysMem = NIL_RTGCPHYS;
6326 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6327 }
6328
6329 /* If the page is writable and does not have the no-exec bit set, all
6330 access is allowed. Otherwise we'll have to check more carefully... */
6331 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6332 {
6333 /* Write to read only memory? */
6334 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6335 && !(fFlags & X86_PTE_RW)
6336 && ( pIemCpu->uCpl != 0
6337 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6338 {
6339 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6340 *pGCPhysMem = NIL_RTGCPHYS;
6341 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6342 }
6343
6344 /* Kernel memory accessed by userland? */
6345 if ( !(fFlags & X86_PTE_US)
6346 && pIemCpu->uCpl == 3
6347 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6348 {
6349 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6350 *pGCPhysMem = NIL_RTGCPHYS;
6351 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6352 }
6353
6354 /* Executing non-executable memory? */
6355 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6356 && (fFlags & X86_PTE_PAE_NX)
6357 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6358 {
6359 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6360 *pGCPhysMem = NIL_RTGCPHYS;
6361 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6362 VERR_ACCESS_DENIED);
6363 }
6364 }
6365
6366 /*
6367 * Set the dirty / access flags.
6368 * ASSUMES this is set when the address is translated rather than on committ...
6369 */
6370 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6371 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6372 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6373 {
6374 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6375 AssertRC(rc2);
6376 }
6377
6378 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6379 *pGCPhysMem = GCPhys;
6380 return VINF_SUCCESS;
6381}
6382
6383
6384
6385/**
6386 * Maps a physical page.
6387 *
6388 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6389 * @param pIemCpu The IEM per CPU data.
6390 * @param GCPhysMem The physical address.
6391 * @param fAccess The intended access.
6392 * @param ppvMem Where to return the mapping address.
6393 * @param pLock The PGM lock.
6394 */
6395IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6396{
6397#ifdef IEM_VERIFICATION_MODE_FULL
6398 /* Force the alternative path so we can ignore writes. */
6399 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6400 {
6401 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6402 {
6403 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6404 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6405 if (RT_FAILURE(rc2))
6406 pIemCpu->fProblematicMemory = true;
6407 }
6408 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6409 }
6410#endif
6411#ifdef IEM_LOG_MEMORY_WRITES
6412 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6413 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6414#endif
6415#ifdef IEM_VERIFICATION_MODE_MINIMAL
6416 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6417#endif
6418
6419 /** @todo This API may require some improving later. A private deal with PGM
6420 * regarding locking and unlocking needs to be struct. A couple of TLBs
6421 * living in PGM, but with publicly accessible inlined access methods
6422 * could perhaps be an even better solution. */
6423 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6424 GCPhysMem,
6425 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6426 pIemCpu->fBypassHandlers,
6427 ppvMem,
6428 pLock);
6429 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6430 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6431
6432#ifdef IEM_VERIFICATION_MODE_FULL
6433 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6434 pIemCpu->fProblematicMemory = true;
6435#endif
6436 return rc;
6437}
6438
6439
6440/**
6441 * Unmap a page previously mapped by iemMemPageMap.
6442 *
6443 * @param pIemCpu The IEM per CPU data.
6444 * @param GCPhysMem The physical address.
6445 * @param fAccess The intended access.
6446 * @param pvMem What iemMemPageMap returned.
6447 * @param pLock The PGM lock.
6448 */
6449DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6450{
6451 NOREF(pIemCpu);
6452 NOREF(GCPhysMem);
6453 NOREF(fAccess);
6454 NOREF(pvMem);
6455 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6456}
6457
6458
6459/**
6460 * Looks up a memory mapping entry.
6461 *
6462 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6463 * @param pIemCpu The IEM per CPU data.
6464 * @param pvMem The memory address.
6465 * @param fAccess The access to.
6466 */
6467DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6468{
6469 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
6470 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6471 if ( pIemCpu->aMemMappings[0].pv == pvMem
6472 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6473 return 0;
6474 if ( pIemCpu->aMemMappings[1].pv == pvMem
6475 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6476 return 1;
6477 if ( pIemCpu->aMemMappings[2].pv == pvMem
6478 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6479 return 2;
6480 return VERR_NOT_FOUND;
6481}
6482
6483
6484/**
6485 * Finds a free memmap entry when using iNextMapping doesn't work.
6486 *
6487 * @returns Memory mapping index, 1024 on failure.
6488 * @param pIemCpu The IEM per CPU data.
6489 */
6490IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6491{
6492 /*
6493 * The easy case.
6494 */
6495 if (pIemCpu->cActiveMappings == 0)
6496 {
6497 pIemCpu->iNextMapping = 1;
6498 return 0;
6499 }
6500
6501 /* There should be enough mappings for all instructions. */
6502 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6503
6504 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6505 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6506 return i;
6507
6508 AssertFailedReturn(1024);
6509}
6510
6511
6512/**
6513 * Commits a bounce buffer that needs writing back and unmaps it.
6514 *
6515 * @returns Strict VBox status code.
6516 * @param pIemCpu The IEM per CPU data.
6517 * @param iMemMap The index of the buffer to commit.
6518 */
6519IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6520{
6521 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6522 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6523
6524 /*
6525 * Do the writing.
6526 */
6527#ifndef IEM_VERIFICATION_MODE_MINIMAL
6528 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6529 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6530 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6531 {
6532 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6533 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6534 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6535 if (!pIemCpu->fBypassHandlers)
6536 {
6537 /*
6538 * Carefully and efficiently dealing with access handler return
6539 * codes make this a little bloated.
6540 */
6541 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6542 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6543 pbBuf,
6544 cbFirst,
6545 PGMACCESSORIGIN_IEM);
6546 if (rcStrict == VINF_SUCCESS)
6547 {
6548 if (cbSecond)
6549 {
6550 rcStrict = PGMPhysWrite(pVM,
6551 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6552 pbBuf + cbFirst,
6553 cbSecond,
6554 PGMACCESSORIGIN_IEM);
6555 if (rcStrict == VINF_SUCCESS)
6556 { /* nothing */ }
6557 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6558 {
6559 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6560 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6561 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6562 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6563 }
6564 else
6565 {
6566 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6567 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6568 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6569 return rcStrict;
6570 }
6571 }
6572 }
6573 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6574 {
6575 if (!cbSecond)
6576 {
6577 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6578 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6579 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6580 }
6581 else
6582 {
6583 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6584 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6585 pbBuf + cbFirst,
6586 cbSecond,
6587 PGMACCESSORIGIN_IEM);
6588 if (rcStrict2 == VINF_SUCCESS)
6589 {
6590 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6591 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6592 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6593 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6594 }
6595 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6596 {
6597 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6598 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6599 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6600 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6601 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6602 }
6603 else
6604 {
6605 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6606 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6607 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6608 return rcStrict2;
6609 }
6610 }
6611 }
6612 else
6613 {
6614 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6615 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6616 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6617 return rcStrict;
6618 }
6619 }
6620 else
6621 {
6622 /*
6623 * No access handlers, much simpler.
6624 */
6625 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6626 if (RT_SUCCESS(rc))
6627 {
6628 if (cbSecond)
6629 {
6630 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6631 if (RT_SUCCESS(rc))
6632 { /* likely */ }
6633 else
6634 {
6635 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6636 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6637 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6638 return rc;
6639 }
6640 }
6641 }
6642 else
6643 {
6644 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6645 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6646 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6647 return rc;
6648 }
6649 }
6650 }
6651#endif
6652
6653#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6654 /*
6655 * Record the write(s).
6656 */
6657 if (!pIemCpu->fNoRem)
6658 {
6659 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6660 if (pEvtRec)
6661 {
6662 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6663 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6664 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6665 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6666 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6667 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6668 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6669 }
6670 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6671 {
6672 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6673 if (pEvtRec)
6674 {
6675 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6676 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6677 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6678 memcpy(pEvtRec->u.RamWrite.ab,
6679 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6680 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6681 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6682 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6683 }
6684 }
6685 }
6686#endif
6687#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6688 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6689 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6690 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6691 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6692 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6693 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6694
6695 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6696 g_cbIemWrote = cbWrote;
6697 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6698#endif
6699
6700 /*
6701 * Free the mapping entry.
6702 */
6703 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6704 Assert(pIemCpu->cActiveMappings != 0);
6705 pIemCpu->cActiveMappings--;
6706 return VINF_SUCCESS;
6707}
6708
6709
6710/**
6711 * iemMemMap worker that deals with a request crossing pages.
6712 */
6713IEM_STATIC VBOXSTRICTRC
6714iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6715{
6716 /*
6717 * Do the address translations.
6718 */
6719 RTGCPHYS GCPhysFirst;
6720 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6721 if (rcStrict != VINF_SUCCESS)
6722 return rcStrict;
6723
6724/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6725 * last byte. */
6726 RTGCPHYS GCPhysSecond;
6727 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6728 if (rcStrict != VINF_SUCCESS)
6729 return rcStrict;
6730 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6731
6732 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6733#ifdef IEM_VERIFICATION_MODE_FULL
6734 /*
6735 * Detect problematic memory when verifying so we can select
6736 * the right execution engine. (TLB: Redo this.)
6737 */
6738 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6739 {
6740 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6741 if (RT_SUCCESS(rc2))
6742 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6743 if (RT_FAILURE(rc2))
6744 pIemCpu->fProblematicMemory = true;
6745 }
6746#endif
6747
6748
6749 /*
6750 * Read in the current memory content if it's a read, execute or partial
6751 * write access.
6752 */
6753 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6754 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6755 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6756
6757 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6758 {
6759 if (!pIemCpu->fBypassHandlers)
6760 {
6761 /*
6762 * Must carefully deal with access handler status codes here,
6763 * makes the code a bit bloated.
6764 */
6765 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6766 if (rcStrict == VINF_SUCCESS)
6767 {
6768 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6769 if (rcStrict == VINF_SUCCESS)
6770 { /*likely */ }
6771 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6772 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6773 else
6774 {
6775 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6776 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6777 return rcStrict;
6778 }
6779 }
6780 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6781 {
6782 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6783 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6784 {
6785 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6786 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6787 }
6788 else
6789 {
6790 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6791 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6792 return rcStrict2;
6793 }
6794 }
6795 else
6796 {
6797 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6798 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6799 return rcStrict;
6800 }
6801 }
6802 else
6803 {
6804 /*
6805 * No informational status codes here, much more straight forward.
6806 */
6807 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6808 if (RT_SUCCESS(rc))
6809 {
6810 Assert(rc == VINF_SUCCESS);
6811 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6812 if (RT_SUCCESS(rc))
6813 Assert(rc == VINF_SUCCESS);
6814 else
6815 {
6816 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6817 return rc;
6818 }
6819 }
6820 else
6821 {
6822 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6823 return rc;
6824 }
6825 }
6826
6827#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6828 if ( !pIemCpu->fNoRem
6829 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6830 {
6831 /*
6832 * Record the reads.
6833 */
6834 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6835 if (pEvtRec)
6836 {
6837 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6838 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6839 pEvtRec->u.RamRead.cb = cbFirstPage;
6840 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6841 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6842 }
6843 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6844 if (pEvtRec)
6845 {
6846 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6847 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6848 pEvtRec->u.RamRead.cb = cbSecondPage;
6849 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6850 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6851 }
6852 }
6853#endif
6854 }
6855#ifdef VBOX_STRICT
6856 else
6857 memset(pbBuf, 0xcc, cbMem);
6858 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6859 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6860#endif
6861
6862 /*
6863 * Commit the bounce buffer entry.
6864 */
6865 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6866 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6867 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6868 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6869 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6870 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6871 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6872 pIemCpu->iNextMapping = iMemMap + 1;
6873 pIemCpu->cActiveMappings++;
6874
6875 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6876 *ppvMem = pbBuf;
6877 return VINF_SUCCESS;
6878}
6879
6880
6881/**
6882 * iemMemMap woker that deals with iemMemPageMap failures.
6883 */
6884IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6885 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6886{
6887 /*
6888 * Filter out conditions we can handle and the ones which shouldn't happen.
6889 */
6890 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6891 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6892 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6893 {
6894 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6895 return rcMap;
6896 }
6897 pIemCpu->cPotentialExits++;
6898
6899 /*
6900 * Read in the current memory content if it's a read, execute or partial
6901 * write access.
6902 */
6903 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6904 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6905 {
6906 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6907 memset(pbBuf, 0xff, cbMem);
6908 else
6909 {
6910 int rc;
6911 if (!pIemCpu->fBypassHandlers)
6912 {
6913 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6914 if (rcStrict == VINF_SUCCESS)
6915 { /* nothing */ }
6916 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6917 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6918 else
6919 {
6920 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6921 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6922 return rcStrict;
6923 }
6924 }
6925 else
6926 {
6927 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6928 if (RT_SUCCESS(rc))
6929 { /* likely */ }
6930 else
6931 {
6932 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6933 GCPhysFirst, rc));
6934 return rc;
6935 }
6936 }
6937 }
6938
6939#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6940 if ( !pIemCpu->fNoRem
6941 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6942 {
6943 /*
6944 * Record the read.
6945 */
6946 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6947 if (pEvtRec)
6948 {
6949 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6950 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6951 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6952 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6953 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6954 }
6955 }
6956#endif
6957 }
6958#ifdef VBOX_STRICT
6959 else
6960 memset(pbBuf, 0xcc, cbMem);
6961#endif
6962#ifdef VBOX_STRICT
6963 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6964 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6965#endif
6966
6967 /*
6968 * Commit the bounce buffer entry.
6969 */
6970 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6971 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6972 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6973 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6974 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6975 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6976 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6977 pIemCpu->iNextMapping = iMemMap + 1;
6978 pIemCpu->cActiveMappings++;
6979
6980 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6981 *ppvMem = pbBuf;
6982 return VINF_SUCCESS;
6983}
6984
6985
6986
6987/**
6988 * Maps the specified guest memory for the given kind of access.
6989 *
6990 * This may be using bounce buffering of the memory if it's crossing a page
6991 * boundary or if there is an access handler installed for any of it. Because
6992 * of lock prefix guarantees, we're in for some extra clutter when this
6993 * happens.
6994 *
6995 * This may raise a \#GP, \#SS, \#PF or \#AC.
6996 *
6997 * @returns VBox strict status code.
6998 *
6999 * @param pIemCpu The IEM per CPU data.
7000 * @param ppvMem Where to return the pointer to the mapped
7001 * memory.
7002 * @param cbMem The number of bytes to map. This is usually 1,
7003 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7004 * string operations it can be up to a page.
7005 * @param iSegReg The index of the segment register to use for
7006 * this access. The base and limits are checked.
7007 * Use UINT8_MAX to indicate that no segmentation
7008 * is required (for IDT, GDT and LDT accesses).
7009 * @param GCPtrMem The address of the guest memory.
7010 * @param fAccess How the memory is being accessed. The
7011 * IEM_ACCESS_TYPE_XXX bit is used to figure out
7012 * how to map the memory, while the
7013 * IEM_ACCESS_WHAT_XXX bit is used when raising
7014 * exceptions.
7015 */
7016IEM_STATIC VBOXSTRICTRC
7017iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
7018{
7019 /*
7020 * Check the input and figure out which mapping entry to use.
7021 */
7022 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7023 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
7024 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
7025
7026 unsigned iMemMap = pIemCpu->iNextMapping;
7027 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
7028 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7029 {
7030 iMemMap = iemMemMapFindFree(pIemCpu);
7031 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
7032 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
7033 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
7034 pIemCpu->aMemMappings[2].fAccess),
7035 VERR_IEM_IPE_9);
7036 }
7037
7038 /*
7039 * Map the memory, checking that we can actually access it. If something
7040 * slightly complicated happens, fall back on bounce buffering.
7041 */
7042 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7043 if (rcStrict != VINF_SUCCESS)
7044 return rcStrict;
7045
7046 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
7047 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
7048
7049 RTGCPHYS GCPhysFirst;
7050 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
7051 if (rcStrict != VINF_SUCCESS)
7052 return rcStrict;
7053
7054 void *pvMem;
7055 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7056 if (rcStrict != VINF_SUCCESS)
7057 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
7058
7059 /*
7060 * Fill in the mapping table entry.
7061 */
7062 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7063 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7064 pIemCpu->iNextMapping = iMemMap + 1;
7065 pIemCpu->cActiveMappings++;
7066
7067 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7068 *ppvMem = pvMem;
7069 return VINF_SUCCESS;
7070}
7071
7072
7073/**
7074 * Commits the guest memory if bounce buffered and unmaps it.
7075 *
7076 * @returns Strict VBox status code.
7077 * @param pIemCpu The IEM per CPU data.
7078 * @param pvMem The mapping.
7079 * @param fAccess The kind of access.
7080 */
7081IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7082{
7083 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7084 AssertReturn(iMemMap >= 0, iMemMap);
7085
7086 /* If it's bounce buffered, we may need to write back the buffer. */
7087 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7088 {
7089 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7090 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7091 }
7092 /* Otherwise unlock it. */
7093 else
7094 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7095
7096 /* Free the entry. */
7097 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7098 Assert(pIemCpu->cActiveMappings != 0);
7099 pIemCpu->cActiveMappings--;
7100 return VINF_SUCCESS;
7101}
7102
7103
7104/**
7105 * Rollbacks mappings, releasing page locks and such.
7106 *
7107 * The caller shall only call this after checking cActiveMappings.
7108 *
7109 * @returns Strict VBox status code to pass up.
7110 * @param pIemCpu The IEM per CPU data.
7111 */
7112IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7113{
7114 Assert(pIemCpu->cActiveMappings > 0);
7115
7116 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7117 while (iMemMap-- > 0)
7118 {
7119 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7120 if (fAccess != IEM_ACCESS_INVALID)
7121 {
7122 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7123 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7124 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7125 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7126 Assert(pIemCpu->cActiveMappings > 0);
7127 pIemCpu->cActiveMappings--;
7128 }
7129 }
7130}
7131
7132
7133/**
7134 * Fetches a data byte.
7135 *
7136 * @returns Strict VBox status code.
7137 * @param pIemCpu The IEM per CPU data.
7138 * @param pu8Dst Where to return the byte.
7139 * @param iSegReg The index of the segment register to use for
7140 * this access. The base and limits are checked.
7141 * @param GCPtrMem The address of the guest memory.
7142 */
7143IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7144{
7145 /* The lazy approach for now... */
7146 uint8_t const *pu8Src;
7147 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7148 if (rc == VINF_SUCCESS)
7149 {
7150 *pu8Dst = *pu8Src;
7151 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7152 }
7153 return rc;
7154}
7155
7156
7157/**
7158 * Fetches a data word.
7159 *
7160 * @returns Strict VBox status code.
7161 * @param pIemCpu The IEM per CPU data.
7162 * @param pu16Dst Where to return the word.
7163 * @param iSegReg The index of the segment register to use for
7164 * this access. The base and limits are checked.
7165 * @param GCPtrMem The address of the guest memory.
7166 */
7167IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7168{
7169 /* The lazy approach for now... */
7170 uint16_t const *pu16Src;
7171 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7172 if (rc == VINF_SUCCESS)
7173 {
7174 *pu16Dst = *pu16Src;
7175 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7176 }
7177 return rc;
7178}
7179
7180
7181/**
7182 * Fetches a data dword.
7183 *
7184 * @returns Strict VBox status code.
7185 * @param pIemCpu The IEM per CPU data.
7186 * @param pu32Dst Where to return the dword.
7187 * @param iSegReg The index of the segment register to use for
7188 * this access. The base and limits are checked.
7189 * @param GCPtrMem The address of the guest memory.
7190 */
7191IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7192{
7193 /* The lazy approach for now... */
7194 uint32_t const *pu32Src;
7195 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7196 if (rc == VINF_SUCCESS)
7197 {
7198 *pu32Dst = *pu32Src;
7199 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7200 }
7201 return rc;
7202}
7203
7204
7205#ifdef SOME_UNUSED_FUNCTION
7206/**
7207 * Fetches a data dword and sign extends it to a qword.
7208 *
7209 * @returns Strict VBox status code.
7210 * @param pIemCpu The IEM per CPU data.
7211 * @param pu64Dst Where to return the sign extended value.
7212 * @param iSegReg The index of the segment register to use for
7213 * this access. The base and limits are checked.
7214 * @param GCPtrMem The address of the guest memory.
7215 */
7216IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7217{
7218 /* The lazy approach for now... */
7219 int32_t const *pi32Src;
7220 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7221 if (rc == VINF_SUCCESS)
7222 {
7223 *pu64Dst = *pi32Src;
7224 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7225 }
7226#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7227 else
7228 *pu64Dst = 0;
7229#endif
7230 return rc;
7231}
7232#endif
7233
7234
7235/**
7236 * Fetches a data qword.
7237 *
7238 * @returns Strict VBox status code.
7239 * @param pIemCpu The IEM per CPU data.
7240 * @param pu64Dst Where to return the qword.
7241 * @param iSegReg The index of the segment register to use for
7242 * this access. The base and limits are checked.
7243 * @param GCPtrMem The address of the guest memory.
7244 */
7245IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7246{
7247 /* The lazy approach for now... */
7248 uint64_t const *pu64Src;
7249 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7250 if (rc == VINF_SUCCESS)
7251 {
7252 *pu64Dst = *pu64Src;
7253 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7254 }
7255 return rc;
7256}
7257
7258
7259/**
7260 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7261 *
7262 * @returns Strict VBox status code.
7263 * @param pIemCpu The IEM per CPU data.
7264 * @param pu64Dst Where to return the qword.
7265 * @param iSegReg The index of the segment register to use for
7266 * this access. The base and limits are checked.
7267 * @param GCPtrMem The address of the guest memory.
7268 */
7269IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7270{
7271 /* The lazy approach for now... */
7272 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7273 if (RT_UNLIKELY(GCPtrMem & 15))
7274 return iemRaiseGeneralProtectionFault0(pIemCpu);
7275
7276 uint64_t const *pu64Src;
7277 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7278 if (rc == VINF_SUCCESS)
7279 {
7280 *pu64Dst = *pu64Src;
7281 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7282 }
7283 return rc;
7284}
7285
7286
7287/**
7288 * Fetches a data tword.
7289 *
7290 * @returns Strict VBox status code.
7291 * @param pIemCpu The IEM per CPU data.
7292 * @param pr80Dst Where to return the tword.
7293 * @param iSegReg The index of the segment register to use for
7294 * this access. The base and limits are checked.
7295 * @param GCPtrMem The address of the guest memory.
7296 */
7297IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7298{
7299 /* The lazy approach for now... */
7300 PCRTFLOAT80U pr80Src;
7301 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7302 if (rc == VINF_SUCCESS)
7303 {
7304 *pr80Dst = *pr80Src;
7305 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7306 }
7307 return rc;
7308}
7309
7310
7311/**
7312 * Fetches a data dqword (double qword), generally SSE related.
7313 *
7314 * @returns Strict VBox status code.
7315 * @param pIemCpu The IEM per CPU data.
7316 * @param pu128Dst Where to return the qword.
7317 * @param iSegReg The index of the segment register to use for
7318 * this access. The base and limits are checked.
7319 * @param GCPtrMem The address of the guest memory.
7320 */
7321IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7322{
7323 /* The lazy approach for now... */
7324 uint128_t const *pu128Src;
7325 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7326 if (rc == VINF_SUCCESS)
7327 {
7328 *pu128Dst = *pu128Src;
7329 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7330 }
7331 return rc;
7332}
7333
7334
7335/**
7336 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7337 * related.
7338 *
7339 * Raises \#GP(0) if not aligned.
7340 *
7341 * @returns Strict VBox status code.
7342 * @param pIemCpu The IEM per CPU data.
7343 * @param pu128Dst Where to return the qword.
7344 * @param iSegReg The index of the segment register to use for
7345 * this access. The base and limits are checked.
7346 * @param GCPtrMem The address of the guest memory.
7347 */
7348IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7349{
7350 /* The lazy approach for now... */
7351 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7352 if ( (GCPtrMem & 15)
7353 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7354 return iemRaiseGeneralProtectionFault0(pIemCpu);
7355
7356 uint128_t const *pu128Src;
7357 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7358 if (rc == VINF_SUCCESS)
7359 {
7360 *pu128Dst = *pu128Src;
7361 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7362 }
7363 return rc;
7364}
7365
7366
7367
7368
7369/**
7370 * Fetches a descriptor register (lgdt, lidt).
7371 *
7372 * @returns Strict VBox status code.
7373 * @param pIemCpu The IEM per CPU data.
7374 * @param pcbLimit Where to return the limit.
7375 * @param pGCPtrBase Where to return the base.
7376 * @param iSegReg The index of the segment register to use for
7377 * this access. The base and limits are checked.
7378 * @param GCPtrMem The address of the guest memory.
7379 * @param enmOpSize The effective operand size.
7380 */
7381IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7382 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7383{
7384 uint8_t const *pu8Src;
7385 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7386 (void **)&pu8Src,
7387 enmOpSize == IEMMODE_64BIT
7388 ? 2 + 8
7389 : enmOpSize == IEMMODE_32BIT
7390 ? 2 + 4
7391 : 2 + 3,
7392 iSegReg,
7393 GCPtrMem,
7394 IEM_ACCESS_DATA_R);
7395 if (rcStrict == VINF_SUCCESS)
7396 {
7397 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7398 switch (enmOpSize)
7399 {
7400 case IEMMODE_16BIT:
7401 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7402 break;
7403 case IEMMODE_32BIT:
7404 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7405 break;
7406 case IEMMODE_64BIT:
7407 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7408 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7409 break;
7410
7411 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7412 }
7413 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7414 }
7415 return rcStrict;
7416}
7417
7418
7419
7420/**
7421 * Stores a data byte.
7422 *
7423 * @returns Strict VBox status code.
7424 * @param pIemCpu The IEM per CPU data.
7425 * @param iSegReg The index of the segment register to use for
7426 * this access. The base and limits are checked.
7427 * @param GCPtrMem The address of the guest memory.
7428 * @param u8Value The value to store.
7429 */
7430IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7431{
7432 /* The lazy approach for now... */
7433 uint8_t *pu8Dst;
7434 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7435 if (rc == VINF_SUCCESS)
7436 {
7437 *pu8Dst = u8Value;
7438 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7439 }
7440 return rc;
7441}
7442
7443
7444/**
7445 * Stores a data word.
7446 *
7447 * @returns Strict VBox status code.
7448 * @param pIemCpu The IEM per CPU data.
7449 * @param iSegReg The index of the segment register to use for
7450 * this access. The base and limits are checked.
7451 * @param GCPtrMem The address of the guest memory.
7452 * @param u16Value The value to store.
7453 */
7454IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7455{
7456 /* The lazy approach for now... */
7457 uint16_t *pu16Dst;
7458 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7459 if (rc == VINF_SUCCESS)
7460 {
7461 *pu16Dst = u16Value;
7462 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7463 }
7464 return rc;
7465}
7466
7467
7468/**
7469 * Stores a data dword.
7470 *
7471 * @returns Strict VBox status code.
7472 * @param pIemCpu The IEM per CPU data.
7473 * @param iSegReg The index of the segment register to use for
7474 * this access. The base and limits are checked.
7475 * @param GCPtrMem The address of the guest memory.
7476 * @param u32Value The value to store.
7477 */
7478IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7479{
7480 /* The lazy approach for now... */
7481 uint32_t *pu32Dst;
7482 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7483 if (rc == VINF_SUCCESS)
7484 {
7485 *pu32Dst = u32Value;
7486 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7487 }
7488 return rc;
7489}
7490
7491
7492/**
7493 * Stores a data qword.
7494 *
7495 * @returns Strict VBox status code.
7496 * @param pIemCpu The IEM per CPU data.
7497 * @param iSegReg The index of the segment register to use for
7498 * this access. The base and limits are checked.
7499 * @param GCPtrMem The address of the guest memory.
7500 * @param u64Value The value to store.
7501 */
7502IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7503{
7504 /* The lazy approach for now... */
7505 uint64_t *pu64Dst;
7506 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7507 if (rc == VINF_SUCCESS)
7508 {
7509 *pu64Dst = u64Value;
7510 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7511 }
7512 return rc;
7513}
7514
7515
7516/**
7517 * Stores a data dqword.
7518 *
7519 * @returns Strict VBox status code.
7520 * @param pIemCpu The IEM per CPU data.
7521 * @param iSegReg The index of the segment register to use for
7522 * this access. The base and limits are checked.
7523 * @param GCPtrMem The address of the guest memory.
7524 * @param u128Value The value to store.
7525 */
7526IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7527{
7528 /* The lazy approach for now... */
7529 uint128_t *pu128Dst;
7530 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7531 if (rc == VINF_SUCCESS)
7532 {
7533 *pu128Dst = u128Value;
7534 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7535 }
7536 return rc;
7537}
7538
7539
7540/**
7541 * Stores a data dqword, SSE aligned.
7542 *
7543 * @returns Strict VBox status code.
7544 * @param pIemCpu The IEM per CPU data.
7545 * @param iSegReg The index of the segment register to use for
7546 * this access. The base and limits are checked.
7547 * @param GCPtrMem The address of the guest memory.
7548 * @param u128Value The value to store.
7549 */
7550IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7551{
7552 /* The lazy approach for now... */
7553 if ( (GCPtrMem & 15)
7554 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7555 return iemRaiseGeneralProtectionFault0(pIemCpu);
7556
7557 uint128_t *pu128Dst;
7558 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7559 if (rc == VINF_SUCCESS)
7560 {
7561 *pu128Dst = u128Value;
7562 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7563 }
7564 return rc;
7565}
7566
7567
7568/**
7569 * Stores a descriptor register (sgdt, sidt).
7570 *
7571 * @returns Strict VBox status code.
7572 * @param pIemCpu The IEM per CPU data.
7573 * @param cbLimit The limit.
7574 * @param GCPtrBase The base address.
7575 * @param iSegReg The index of the segment register to use for
7576 * this access. The base and limits are checked.
7577 * @param GCPtrMem The address of the guest memory.
7578 * @param enmOpSize The effective operand size.
7579 */
7580IEM_STATIC VBOXSTRICTRC
7581iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7582{
7583 uint8_t *pu8Src;
7584 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7585 (void **)&pu8Src,
7586 enmOpSize == IEMMODE_64BIT
7587 ? 2 + 8
7588 : enmOpSize == IEMMODE_32BIT
7589 ? 2 + 4
7590 : 2 + 3,
7591 iSegReg,
7592 GCPtrMem,
7593 IEM_ACCESS_DATA_W);
7594 if (rcStrict == VINF_SUCCESS)
7595 {
7596 pu8Src[0] = RT_BYTE1(cbLimit);
7597 pu8Src[1] = RT_BYTE2(cbLimit);
7598 pu8Src[2] = RT_BYTE1(GCPtrBase);
7599 pu8Src[3] = RT_BYTE2(GCPtrBase);
7600 pu8Src[4] = RT_BYTE3(GCPtrBase);
7601 if (enmOpSize == IEMMODE_16BIT)
7602 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7603 else
7604 {
7605 pu8Src[5] = RT_BYTE4(GCPtrBase);
7606 if (enmOpSize == IEMMODE_64BIT)
7607 {
7608 pu8Src[6] = RT_BYTE5(GCPtrBase);
7609 pu8Src[7] = RT_BYTE6(GCPtrBase);
7610 pu8Src[8] = RT_BYTE7(GCPtrBase);
7611 pu8Src[9] = RT_BYTE8(GCPtrBase);
7612 }
7613 }
7614 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7615 }
7616 return rcStrict;
7617}
7618
7619
7620/**
7621 * Pushes a word onto the stack.
7622 *
7623 * @returns Strict VBox status code.
7624 * @param pIemCpu The IEM per CPU data.
7625 * @param u16Value The value to push.
7626 */
7627IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7628{
7629 /* Increment the stack pointer. */
7630 uint64_t uNewRsp;
7631 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7632 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7633
7634 /* Write the word the lazy way. */
7635 uint16_t *pu16Dst;
7636 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7637 if (rc == VINF_SUCCESS)
7638 {
7639 *pu16Dst = u16Value;
7640 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7641 }
7642
7643 /* Commit the new RSP value unless we an access handler made trouble. */
7644 if (rc == VINF_SUCCESS)
7645 pCtx->rsp = uNewRsp;
7646
7647 return rc;
7648}
7649
7650
7651/**
7652 * Pushes a dword onto the stack.
7653 *
7654 * @returns Strict VBox status code.
7655 * @param pIemCpu The IEM per CPU data.
7656 * @param u32Value The value to push.
7657 */
7658IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7659{
7660 /* Increment the stack pointer. */
7661 uint64_t uNewRsp;
7662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7663 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7664
7665 /* Write the dword the lazy way. */
7666 uint32_t *pu32Dst;
7667 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7668 if (rc == VINF_SUCCESS)
7669 {
7670 *pu32Dst = u32Value;
7671 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7672 }
7673
7674 /* Commit the new RSP value unless we an access handler made trouble. */
7675 if (rc == VINF_SUCCESS)
7676 pCtx->rsp = uNewRsp;
7677
7678 return rc;
7679}
7680
7681
7682/**
7683 * Pushes a dword segment register value onto the stack.
7684 *
7685 * @returns Strict VBox status code.
7686 * @param pIemCpu The IEM per CPU data.
7687 * @param u32Value The value to push.
7688 */
7689IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7690{
7691 /* Increment the stack pointer. */
7692 uint64_t uNewRsp;
7693 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7694 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7695
7696 VBOXSTRICTRC rc;
7697 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7698 {
7699 /* The recompiler writes a full dword. */
7700 uint32_t *pu32Dst;
7701 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7702 if (rc == VINF_SUCCESS)
7703 {
7704 *pu32Dst = u32Value;
7705 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7706 }
7707 }
7708 else
7709 {
7710 /* The intel docs talks about zero extending the selector register
7711 value. My actual intel CPU here might be zero extending the value
7712 but it still only writes the lower word... */
7713 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7714 * happens when crossing an electric page boundrary, is the high word checked
7715 * for write accessibility or not? Probably it is. What about segment limits?
7716 * It appears this behavior is also shared with trap error codes.
7717 *
7718 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7719 * ancient hardware when it actually did change. */
7720 uint16_t *pu16Dst;
7721 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7722 if (rc == VINF_SUCCESS)
7723 {
7724 *pu16Dst = (uint16_t)u32Value;
7725 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7726 }
7727 }
7728
7729 /* Commit the new RSP value unless we an access handler made trouble. */
7730 if (rc == VINF_SUCCESS)
7731 pCtx->rsp = uNewRsp;
7732
7733 return rc;
7734}
7735
7736
7737/**
7738 * Pushes a qword onto the stack.
7739 *
7740 * @returns Strict VBox status code.
7741 * @param pIemCpu The IEM per CPU data.
7742 * @param u64Value The value to push.
7743 */
7744IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7745{
7746 /* Increment the stack pointer. */
7747 uint64_t uNewRsp;
7748 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7749 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7750
7751 /* Write the word the lazy way. */
7752 uint64_t *pu64Dst;
7753 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7754 if (rc == VINF_SUCCESS)
7755 {
7756 *pu64Dst = u64Value;
7757 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7758 }
7759
7760 /* Commit the new RSP value unless we an access handler made trouble. */
7761 if (rc == VINF_SUCCESS)
7762 pCtx->rsp = uNewRsp;
7763
7764 return rc;
7765}
7766
7767
7768/**
7769 * Pops a word from the stack.
7770 *
7771 * @returns Strict VBox status code.
7772 * @param pIemCpu The IEM per CPU data.
7773 * @param pu16Value Where to store the popped value.
7774 */
7775IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7776{
7777 /* Increment the stack pointer. */
7778 uint64_t uNewRsp;
7779 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7780 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7781
7782 /* Write the word the lazy way. */
7783 uint16_t const *pu16Src;
7784 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7785 if (rc == VINF_SUCCESS)
7786 {
7787 *pu16Value = *pu16Src;
7788 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7789
7790 /* Commit the new RSP value. */
7791 if (rc == VINF_SUCCESS)
7792 pCtx->rsp = uNewRsp;
7793 }
7794
7795 return rc;
7796}
7797
7798
7799/**
7800 * Pops a dword from the stack.
7801 *
7802 * @returns Strict VBox status code.
7803 * @param pIemCpu The IEM per CPU data.
7804 * @param pu32Value Where to store the popped value.
7805 */
7806IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7807{
7808 /* Increment the stack pointer. */
7809 uint64_t uNewRsp;
7810 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7811 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7812
7813 /* Write the word the lazy way. */
7814 uint32_t const *pu32Src;
7815 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7816 if (rc == VINF_SUCCESS)
7817 {
7818 *pu32Value = *pu32Src;
7819 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7820
7821 /* Commit the new RSP value. */
7822 if (rc == VINF_SUCCESS)
7823 pCtx->rsp = uNewRsp;
7824 }
7825
7826 return rc;
7827}
7828
7829
7830/**
7831 * Pops a qword from the stack.
7832 *
7833 * @returns Strict VBox status code.
7834 * @param pIemCpu The IEM per CPU data.
7835 * @param pu64Value Where to store the popped value.
7836 */
7837IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7838{
7839 /* Increment the stack pointer. */
7840 uint64_t uNewRsp;
7841 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7842 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7843
7844 /* Write the word the lazy way. */
7845 uint64_t const *pu64Src;
7846 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7847 if (rc == VINF_SUCCESS)
7848 {
7849 *pu64Value = *pu64Src;
7850 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7851
7852 /* Commit the new RSP value. */
7853 if (rc == VINF_SUCCESS)
7854 pCtx->rsp = uNewRsp;
7855 }
7856
7857 return rc;
7858}
7859
7860
7861/**
7862 * Pushes a word onto the stack, using a temporary stack pointer.
7863 *
7864 * @returns Strict VBox status code.
7865 * @param pIemCpu The IEM per CPU data.
7866 * @param u16Value The value to push.
7867 * @param pTmpRsp Pointer to the temporary stack pointer.
7868 */
7869IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7870{
7871 /* Increment the stack pointer. */
7872 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7873 RTUINT64U NewRsp = *pTmpRsp;
7874 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7875
7876 /* Write the word the lazy way. */
7877 uint16_t *pu16Dst;
7878 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7879 if (rc == VINF_SUCCESS)
7880 {
7881 *pu16Dst = u16Value;
7882 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7883 }
7884
7885 /* Commit the new RSP value unless we an access handler made trouble. */
7886 if (rc == VINF_SUCCESS)
7887 *pTmpRsp = NewRsp;
7888
7889 return rc;
7890}
7891
7892
7893/**
7894 * Pushes a dword onto the stack, using a temporary stack pointer.
7895 *
7896 * @returns Strict VBox status code.
7897 * @param pIemCpu The IEM per CPU data.
7898 * @param u32Value The value to push.
7899 * @param pTmpRsp Pointer to the temporary stack pointer.
7900 */
7901IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7902{
7903 /* Increment the stack pointer. */
7904 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7905 RTUINT64U NewRsp = *pTmpRsp;
7906 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7907
7908 /* Write the word the lazy way. */
7909 uint32_t *pu32Dst;
7910 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7911 if (rc == VINF_SUCCESS)
7912 {
7913 *pu32Dst = u32Value;
7914 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7915 }
7916
7917 /* Commit the new RSP value unless we an access handler made trouble. */
7918 if (rc == VINF_SUCCESS)
7919 *pTmpRsp = NewRsp;
7920
7921 return rc;
7922}
7923
7924
7925/**
7926 * Pushes a dword onto the stack, using a temporary stack pointer.
7927 *
7928 * @returns Strict VBox status code.
7929 * @param pIemCpu The IEM per CPU data.
7930 * @param u64Value The value to push.
7931 * @param pTmpRsp Pointer to the temporary stack pointer.
7932 */
7933IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7934{
7935 /* Increment the stack pointer. */
7936 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7937 RTUINT64U NewRsp = *pTmpRsp;
7938 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7939
7940 /* Write the word the lazy way. */
7941 uint64_t *pu64Dst;
7942 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7943 if (rc == VINF_SUCCESS)
7944 {
7945 *pu64Dst = u64Value;
7946 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7947 }
7948
7949 /* Commit the new RSP value unless we an access handler made trouble. */
7950 if (rc == VINF_SUCCESS)
7951 *pTmpRsp = NewRsp;
7952
7953 return rc;
7954}
7955
7956
7957/**
7958 * Pops a word from the stack, using a temporary stack pointer.
7959 *
7960 * @returns Strict VBox status code.
7961 * @param pIemCpu The IEM per CPU data.
7962 * @param pu16Value Where to store the popped value.
7963 * @param pTmpRsp Pointer to the temporary stack pointer.
7964 */
7965IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7966{
7967 /* Increment the stack pointer. */
7968 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7969 RTUINT64U NewRsp = *pTmpRsp;
7970 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7971
7972 /* Write the word the lazy way. */
7973 uint16_t const *pu16Src;
7974 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7975 if (rc == VINF_SUCCESS)
7976 {
7977 *pu16Value = *pu16Src;
7978 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7979
7980 /* Commit the new RSP value. */
7981 if (rc == VINF_SUCCESS)
7982 *pTmpRsp = NewRsp;
7983 }
7984
7985 return rc;
7986}
7987
7988
7989/**
7990 * Pops a dword from the stack, using a temporary stack pointer.
7991 *
7992 * @returns Strict VBox status code.
7993 * @param pIemCpu The IEM per CPU data.
7994 * @param pu32Value Where to store the popped value.
7995 * @param pTmpRsp Pointer to the temporary stack pointer.
7996 */
7997IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7998{
7999 /* Increment the stack pointer. */
8000 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8001 RTUINT64U NewRsp = *pTmpRsp;
8002 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
8003
8004 /* Write the word the lazy way. */
8005 uint32_t const *pu32Src;
8006 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8007 if (rc == VINF_SUCCESS)
8008 {
8009 *pu32Value = *pu32Src;
8010 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8011
8012 /* Commit the new RSP value. */
8013 if (rc == VINF_SUCCESS)
8014 *pTmpRsp = NewRsp;
8015 }
8016
8017 return rc;
8018}
8019
8020
8021/**
8022 * Pops a qword from the stack, using a temporary stack pointer.
8023 *
8024 * @returns Strict VBox status code.
8025 * @param pIemCpu The IEM per CPU data.
8026 * @param pu64Value Where to store the popped value.
8027 * @param pTmpRsp Pointer to the temporary stack pointer.
8028 */
8029IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
8030{
8031 /* Increment the stack pointer. */
8032 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8033 RTUINT64U NewRsp = *pTmpRsp;
8034 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8035
8036 /* Write the word the lazy way. */
8037 uint64_t const *pu64Src;
8038 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8039 if (rcStrict == VINF_SUCCESS)
8040 {
8041 *pu64Value = *pu64Src;
8042 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8043
8044 /* Commit the new RSP value. */
8045 if (rcStrict == VINF_SUCCESS)
8046 *pTmpRsp = NewRsp;
8047 }
8048
8049 return rcStrict;
8050}
8051
8052
8053/**
8054 * Begin a special stack push (used by interrupt, exceptions and such).
8055 *
8056 * This will raise \#SS or \#PF if appropriate.
8057 *
8058 * @returns Strict VBox status code.
8059 * @param pIemCpu The IEM per CPU data.
8060 * @param cbMem The number of bytes to push onto the stack.
8061 * @param ppvMem Where to return the pointer to the stack memory.
8062 * As with the other memory functions this could be
8063 * direct access or bounce buffered access, so
8064 * don't commit register until the commit call
8065 * succeeds.
8066 * @param puNewRsp Where to return the new RSP value. This must be
8067 * passed unchanged to
8068 * iemMemStackPushCommitSpecial().
8069 */
8070IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8071{
8072 Assert(cbMem < UINT8_MAX);
8073 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8074 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8075 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8076}
8077
8078
8079/**
8080 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8081 *
8082 * This will update the rSP.
8083 *
8084 * @returns Strict VBox status code.
8085 * @param pIemCpu The IEM per CPU data.
8086 * @param pvMem The pointer returned by
8087 * iemMemStackPushBeginSpecial().
8088 * @param uNewRsp The new RSP value returned by
8089 * iemMemStackPushBeginSpecial().
8090 */
8091IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8092{
8093 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8094 if (rcStrict == VINF_SUCCESS)
8095 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8096 return rcStrict;
8097}
8098
8099
8100/**
8101 * Begin a special stack pop (used by iret, retf and such).
8102 *
8103 * This will raise \#SS or \#PF if appropriate.
8104 *
8105 * @returns Strict VBox status code.
8106 * @param pIemCpu The IEM per CPU data.
8107 * @param cbMem The number of bytes to push onto the stack.
8108 * @param ppvMem Where to return the pointer to the stack memory.
8109 * @param puNewRsp Where to return the new RSP value. This must be
8110 * passed unchanged to
8111 * iemMemStackPopCommitSpecial() or applied
8112 * manually if iemMemStackPopDoneSpecial() is used.
8113 */
8114IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8115{
8116 Assert(cbMem < UINT8_MAX);
8117 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8118 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8119 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8120}
8121
8122
8123/**
8124 * Continue a special stack pop (used by iret and retf).
8125 *
8126 * This will raise \#SS or \#PF if appropriate.
8127 *
8128 * @returns Strict VBox status code.
8129 * @param pIemCpu The IEM per CPU data.
8130 * @param cbMem The number of bytes to push onto the stack.
8131 * @param ppvMem Where to return the pointer to the stack memory.
8132 * @param puNewRsp Where to return the new RSP value. This must be
8133 * passed unchanged to
8134 * iemMemStackPopCommitSpecial() or applied
8135 * manually if iemMemStackPopDoneSpecial() is used.
8136 */
8137IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8138{
8139 Assert(cbMem < UINT8_MAX);
8140 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8141 RTUINT64U NewRsp;
8142 NewRsp.u = *puNewRsp;
8143 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8144 *puNewRsp = NewRsp.u;
8145 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8146}
8147
8148
8149/**
8150 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8151 *
8152 * This will update the rSP.
8153 *
8154 * @returns Strict VBox status code.
8155 * @param pIemCpu The IEM per CPU data.
8156 * @param pvMem The pointer returned by
8157 * iemMemStackPopBeginSpecial().
8158 * @param uNewRsp The new RSP value returned by
8159 * iemMemStackPopBeginSpecial().
8160 */
8161IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8162{
8163 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8164 if (rcStrict == VINF_SUCCESS)
8165 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8166 return rcStrict;
8167}
8168
8169
8170/**
8171 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8172 * iemMemStackPopContinueSpecial).
8173 *
8174 * The caller will manually commit the rSP.
8175 *
8176 * @returns Strict VBox status code.
8177 * @param pIemCpu The IEM per CPU data.
8178 * @param pvMem The pointer returned by
8179 * iemMemStackPopBeginSpecial() or
8180 * iemMemStackPopContinueSpecial().
8181 */
8182IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8183{
8184 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8185}
8186
8187
8188/**
8189 * Fetches a system table byte.
8190 *
8191 * @returns Strict VBox status code.
8192 * @param pIemCpu The IEM per CPU data.
8193 * @param pbDst Where to return the byte.
8194 * @param iSegReg The index of the segment register to use for
8195 * this access. The base and limits are checked.
8196 * @param GCPtrMem The address of the guest memory.
8197 */
8198IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8199{
8200 /* The lazy approach for now... */
8201 uint8_t const *pbSrc;
8202 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8203 if (rc == VINF_SUCCESS)
8204 {
8205 *pbDst = *pbSrc;
8206 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8207 }
8208 return rc;
8209}
8210
8211
8212/**
8213 * Fetches a system table word.
8214 *
8215 * @returns Strict VBox status code.
8216 * @param pIemCpu The IEM per CPU data.
8217 * @param pu16Dst Where to return the word.
8218 * @param iSegReg The index of the segment register to use for
8219 * this access. The base and limits are checked.
8220 * @param GCPtrMem The address of the guest memory.
8221 */
8222IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8223{
8224 /* The lazy approach for now... */
8225 uint16_t const *pu16Src;
8226 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8227 if (rc == VINF_SUCCESS)
8228 {
8229 *pu16Dst = *pu16Src;
8230 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8231 }
8232 return rc;
8233}
8234
8235
8236/**
8237 * Fetches a system table dword.
8238 *
8239 * @returns Strict VBox status code.
8240 * @param pIemCpu The IEM per CPU data.
8241 * @param pu32Dst Where to return the dword.
8242 * @param iSegReg The index of the segment register to use for
8243 * this access. The base and limits are checked.
8244 * @param GCPtrMem The address of the guest memory.
8245 */
8246IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8247{
8248 /* The lazy approach for now... */
8249 uint32_t const *pu32Src;
8250 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8251 if (rc == VINF_SUCCESS)
8252 {
8253 *pu32Dst = *pu32Src;
8254 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8255 }
8256 return rc;
8257}
8258
8259
8260/**
8261 * Fetches a system table qword.
8262 *
8263 * @returns Strict VBox status code.
8264 * @param pIemCpu The IEM per CPU data.
8265 * @param pu64Dst Where to return the qword.
8266 * @param iSegReg The index of the segment register to use for
8267 * this access. The base and limits are checked.
8268 * @param GCPtrMem The address of the guest memory.
8269 */
8270IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8271{
8272 /* The lazy approach for now... */
8273 uint64_t const *pu64Src;
8274 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8275 if (rc == VINF_SUCCESS)
8276 {
8277 *pu64Dst = *pu64Src;
8278 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8279 }
8280 return rc;
8281}
8282
8283
8284/**
8285 * Fetches a descriptor table entry with caller specified error code.
8286 *
8287 * @returns Strict VBox status code.
8288 * @param pIemCpu The IEM per CPU.
8289 * @param pDesc Where to return the descriptor table entry.
8290 * @param uSel The selector which table entry to fetch.
8291 * @param uXcpt The exception to raise on table lookup error.
8292 * @param uErrorCode The error code associated with the exception.
8293 */
8294IEM_STATIC VBOXSTRICTRC
8295iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8296{
8297 AssertPtr(pDesc);
8298 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8299
8300 /** @todo did the 286 require all 8 bytes to be accessible? */
8301 /*
8302 * Get the selector table base and check bounds.
8303 */
8304 RTGCPTR GCPtrBase;
8305 if (uSel & X86_SEL_LDT)
8306 {
8307 if ( !pCtx->ldtr.Attr.n.u1Present
8308 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8309 {
8310 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8311 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8312 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8313 uErrorCode, 0);
8314 }
8315
8316 Assert(pCtx->ldtr.Attr.n.u1Present);
8317 GCPtrBase = pCtx->ldtr.u64Base;
8318 }
8319 else
8320 {
8321 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8322 {
8323 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8324 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8325 uErrorCode, 0);
8326 }
8327 GCPtrBase = pCtx->gdtr.pGdt;
8328 }
8329
8330 /*
8331 * Read the legacy descriptor and maybe the long mode extensions if
8332 * required.
8333 */
8334 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8335 if (rcStrict == VINF_SUCCESS)
8336 {
8337 if ( !IEM_IS_LONG_MODE(pIemCpu)
8338 || pDesc->Legacy.Gen.u1DescType)
8339 pDesc->Long.au64[1] = 0;
8340 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8341 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8342 else
8343 {
8344 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8345 /** @todo is this the right exception? */
8346 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8347 }
8348 }
8349 return rcStrict;
8350}
8351
8352
8353/**
8354 * Fetches a descriptor table entry.
8355 *
8356 * @returns Strict VBox status code.
8357 * @param pIemCpu The IEM per CPU.
8358 * @param pDesc Where to return the descriptor table entry.
8359 * @param uSel The selector which table entry to fetch.
8360 * @param uXcpt The exception to raise on table lookup error.
8361 */
8362IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8363{
8364 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8365}
8366
8367
8368/**
8369 * Fakes a long mode stack selector for SS = 0.
8370 *
8371 * @param pDescSs Where to return the fake stack descriptor.
8372 * @param uDpl The DPL we want.
8373 */
8374IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8375{
8376 pDescSs->Long.au64[0] = 0;
8377 pDescSs->Long.au64[1] = 0;
8378 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8379 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8380 pDescSs->Long.Gen.u2Dpl = uDpl;
8381 pDescSs->Long.Gen.u1Present = 1;
8382 pDescSs->Long.Gen.u1Long = 1;
8383}
8384
8385
8386/**
8387 * Marks the selector descriptor as accessed (only non-system descriptors).
8388 *
8389 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8390 * will therefore skip the limit checks.
8391 *
8392 * @returns Strict VBox status code.
8393 * @param pIemCpu The IEM per CPU.
8394 * @param uSel The selector.
8395 */
8396IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8397{
8398 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8399
8400 /*
8401 * Get the selector table base and calculate the entry address.
8402 */
8403 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8404 ? pCtx->ldtr.u64Base
8405 : pCtx->gdtr.pGdt;
8406 GCPtr += uSel & X86_SEL_MASK;
8407
8408 /*
8409 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8410 * ugly stuff to avoid this. This will make sure it's an atomic access
8411 * as well more or less remove any question about 8-bit or 32-bit accesss.
8412 */
8413 VBOXSTRICTRC rcStrict;
8414 uint32_t volatile *pu32;
8415 if ((GCPtr & 3) == 0)
8416 {
8417 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8418 GCPtr += 2 + 2;
8419 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8420 if (rcStrict != VINF_SUCCESS)
8421 return rcStrict;
8422 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8423 }
8424 else
8425 {
8426 /* The misaligned GDT/LDT case, map the whole thing. */
8427 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8428 if (rcStrict != VINF_SUCCESS)
8429 return rcStrict;
8430 switch ((uintptr_t)pu32 & 3)
8431 {
8432 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8433 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8434 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8435 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8436 }
8437 }
8438
8439 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8440}
8441
8442/** @} */
8443
8444
8445/*
8446 * Include the C/C++ implementation of instruction.
8447 */
8448#include "IEMAllCImpl.cpp.h"
8449
8450
8451
8452/** @name "Microcode" macros.
8453 *
8454 * The idea is that we should be able to use the same code to interpret
8455 * instructions as well as recompiler instructions. Thus this obfuscation.
8456 *
8457 * @{
8458 */
8459#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8460#define IEM_MC_END() }
8461#define IEM_MC_PAUSE() do {} while (0)
8462#define IEM_MC_CONTINUE() do {} while (0)
8463
8464/** Internal macro. */
8465#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8466 do \
8467 { \
8468 VBOXSTRICTRC rcStrict2 = a_Expr; \
8469 if (rcStrict2 != VINF_SUCCESS) \
8470 return rcStrict2; \
8471 } while (0)
8472
8473#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8474#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8475#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8476#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8477#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8478#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8479#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8480
8481#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8482#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8483 do { \
8484 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8485 return iemRaiseDeviceNotAvailable(pIemCpu); \
8486 } while (0)
8487#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8488 do { \
8489 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8490 return iemRaiseMathFault(pIemCpu); \
8491 } while (0)
8492#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8493 do { \
8494 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8495 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8496 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8497 return iemRaiseUndefinedOpcode(pIemCpu); \
8498 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8499 return iemRaiseDeviceNotAvailable(pIemCpu); \
8500 } while (0)
8501#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8502 do { \
8503 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8504 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8505 return iemRaiseUndefinedOpcode(pIemCpu); \
8506 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8507 return iemRaiseDeviceNotAvailable(pIemCpu); \
8508 } while (0)
8509#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8510 do { \
8511 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8512 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8513 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8514 return iemRaiseUndefinedOpcode(pIemCpu); \
8515 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8516 return iemRaiseDeviceNotAvailable(pIemCpu); \
8517 } while (0)
8518#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8519 do { \
8520 if (pIemCpu->uCpl != 0) \
8521 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8522 } while (0)
8523
8524
8525#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8526#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8527#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8528#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8529#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8530#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8531#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8532 uint32_t a_Name; \
8533 uint32_t *a_pName = &a_Name
8534#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8535 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8536
8537#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8538#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8539
8540#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8541#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8542#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8543#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8544#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8545#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8546#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8547#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8548#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8549#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8550#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8551#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8552#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8553#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8554#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8555#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8556#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8557#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8558#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8559#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8560#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8561#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8562#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8563#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8564#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8565#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8566#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8567#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8568#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8569/** @note Not for IOPL or IF testing or modification. */
8570#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8571#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8572#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8573#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8574
8575#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8576#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8577#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8578#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8579#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8580#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8581#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8582#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8583#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8584#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8585#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8586 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8587
8588#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8589#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8590/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8591 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8592#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8593#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8594/** @note Not for IOPL or IF testing or modification. */
8595#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8596
8597#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8598#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8599#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8600 do { \
8601 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8602 *pu32Reg += (a_u32Value); \
8603 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8604 } while (0)
8605#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8606
8607#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8608#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8609#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8610 do { \
8611 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8612 *pu32Reg -= (a_u32Value); \
8613 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8614 } while (0)
8615#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8616
8617#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8618#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8619#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8620#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8621#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8622#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8623#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8624
8625#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8626#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8627#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8628#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8629
8630#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8631#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8632#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8633
8634#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8635#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8636
8637#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8638#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8639#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8640
8641#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8642#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8643#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8644
8645#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8646
8647#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8648
8649#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8650#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8651#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8652 do { \
8653 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8654 *pu32Reg &= (a_u32Value); \
8655 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8656 } while (0)
8657#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8658
8659#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8660#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8661#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8662 do { \
8663 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8664 *pu32Reg |= (a_u32Value); \
8665 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8666 } while (0)
8667#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8668
8669
8670/** @note Not for IOPL or IF modification. */
8671#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8672/** @note Not for IOPL or IF modification. */
8673#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8674/** @note Not for IOPL or IF modification. */
8675#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8676
8677#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8678
8679
8680#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8681 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8682#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8683 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8684#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8685 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8686#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8687 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8688#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8689 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8690#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8691 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8692#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8693 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8694
8695#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8696 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8697#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8698 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8699#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8700 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8701#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8702 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8703#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8704 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8705 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8706 } while (0)
8707#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8708 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8709 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8710 } while (0)
8711#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8712 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8713#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8714 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8715#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8716 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8717
8718#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8720#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8721 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8722#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8723 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8724
8725#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8727#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8728 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8729#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8730 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8731
8732#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8734#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8736#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8737 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8738
8739#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8741
8742#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8743 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8744#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8746#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8748#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8749 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8750
8751#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8753#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8754 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8755#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8756 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8757
8758#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8760#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8761 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8762
8763
8764
8765#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8766 do { \
8767 uint8_t u8Tmp; \
8768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8769 (a_u16Dst) = u8Tmp; \
8770 } while (0)
8771#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8772 do { \
8773 uint8_t u8Tmp; \
8774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8775 (a_u32Dst) = u8Tmp; \
8776 } while (0)
8777#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8778 do { \
8779 uint8_t u8Tmp; \
8780 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8781 (a_u64Dst) = u8Tmp; \
8782 } while (0)
8783#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8784 do { \
8785 uint16_t u16Tmp; \
8786 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8787 (a_u32Dst) = u16Tmp; \
8788 } while (0)
8789#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8790 do { \
8791 uint16_t u16Tmp; \
8792 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8793 (a_u64Dst) = u16Tmp; \
8794 } while (0)
8795#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8796 do { \
8797 uint32_t u32Tmp; \
8798 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8799 (a_u64Dst) = u32Tmp; \
8800 } while (0)
8801
8802#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8803 do { \
8804 uint8_t u8Tmp; \
8805 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8806 (a_u16Dst) = (int8_t)u8Tmp; \
8807 } while (0)
8808#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8809 do { \
8810 uint8_t u8Tmp; \
8811 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8812 (a_u32Dst) = (int8_t)u8Tmp; \
8813 } while (0)
8814#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8815 do { \
8816 uint8_t u8Tmp; \
8817 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8818 (a_u64Dst) = (int8_t)u8Tmp; \
8819 } while (0)
8820#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8821 do { \
8822 uint16_t u16Tmp; \
8823 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8824 (a_u32Dst) = (int16_t)u16Tmp; \
8825 } while (0)
8826#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8827 do { \
8828 uint16_t u16Tmp; \
8829 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8830 (a_u64Dst) = (int16_t)u16Tmp; \
8831 } while (0)
8832#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8833 do { \
8834 uint32_t u32Tmp; \
8835 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8836 (a_u64Dst) = (int32_t)u32Tmp; \
8837 } while (0)
8838
8839#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8840 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8841#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8842 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8843#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8844 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8845#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8846 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8847
8848#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8849 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8850#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8851 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8852#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8853 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8854#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8855 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8856
8857#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8858#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8859#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8860#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8861#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8862#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8863#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8864 do { \
8865 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8866 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8867 } while (0)
8868
8869#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8870 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8871#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8872 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8873
8874
8875#define IEM_MC_PUSH_U16(a_u16Value) \
8876 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8877#define IEM_MC_PUSH_U32(a_u32Value) \
8878 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8879#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8880 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8881#define IEM_MC_PUSH_U64(a_u64Value) \
8882 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8883
8884#define IEM_MC_POP_U16(a_pu16Value) \
8885 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8886#define IEM_MC_POP_U32(a_pu32Value) \
8887 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8888#define IEM_MC_POP_U64(a_pu64Value) \
8889 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8890
8891/** Maps guest memory for direct or bounce buffered access.
8892 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8893 * @remarks May return.
8894 */
8895#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8896 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8897
8898/** Maps guest memory for direct or bounce buffered access.
8899 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8900 * @remarks May return.
8901 */
8902#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8903 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8904
8905/** Commits the memory and unmaps the guest memory.
8906 * @remarks May return.
8907 */
8908#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8909 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8910
8911/** Commits the memory and unmaps the guest memory unless the FPU status word
8912 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8913 * that would cause FLD not to store.
8914 *
8915 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8916 * store, while \#P will not.
8917 *
8918 * @remarks May in theory return - for now.
8919 */
8920#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8921 do { \
8922 if ( !(a_u16FSW & X86_FSW_ES) \
8923 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8924 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8925 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8926 } while (0)
8927
8928/** Calculate efficient address from R/M. */
8929#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8930 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8931
8932#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8933#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8934#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8935#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8936#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8937#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8938#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8939
8940/**
8941 * Defers the rest of the instruction emulation to a C implementation routine
8942 * and returns, only taking the standard parameters.
8943 *
8944 * @param a_pfnCImpl The pointer to the C routine.
8945 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8946 */
8947#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8948
8949/**
8950 * Defers the rest of instruction emulation to a C implementation routine and
8951 * returns, taking one argument in addition to the standard ones.
8952 *
8953 * @param a_pfnCImpl The pointer to the C routine.
8954 * @param a0 The argument.
8955 */
8956#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8957
8958/**
8959 * Defers the rest of the instruction emulation to a C implementation routine
8960 * and returns, taking two arguments in addition to the standard ones.
8961 *
8962 * @param a_pfnCImpl The pointer to the C routine.
8963 * @param a0 The first extra argument.
8964 * @param a1 The second extra argument.
8965 */
8966#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8967
8968/**
8969 * Defers the rest of the instruction emulation to a C implementation routine
8970 * and returns, taking three arguments in addition to the standard ones.
8971 *
8972 * @param a_pfnCImpl The pointer to the C routine.
8973 * @param a0 The first extra argument.
8974 * @param a1 The second extra argument.
8975 * @param a2 The third extra argument.
8976 */
8977#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8978
8979/**
8980 * Defers the rest of the instruction emulation to a C implementation routine
8981 * and returns, taking four arguments in addition to the standard ones.
8982 *
8983 * @param a_pfnCImpl The pointer to the C routine.
8984 * @param a0 The first extra argument.
8985 * @param a1 The second extra argument.
8986 * @param a2 The third extra argument.
8987 * @param a3 The fourth extra argument.
8988 */
8989#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8990
8991/**
8992 * Defers the rest of the instruction emulation to a C implementation routine
8993 * and returns, taking two arguments in addition to the standard ones.
8994 *
8995 * @param a_pfnCImpl The pointer to the C routine.
8996 * @param a0 The first extra argument.
8997 * @param a1 The second extra argument.
8998 * @param a2 The third extra argument.
8999 * @param a3 The fourth extra argument.
9000 * @param a4 The fifth extra argument.
9001 */
9002#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
9003
9004/**
9005 * Defers the entire instruction emulation to a C implementation routine and
9006 * returns, only taking the standard parameters.
9007 *
9008 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9009 *
9010 * @param a_pfnCImpl The pointer to the C routine.
9011 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
9012 */
9013#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
9014
9015/**
9016 * Defers the entire instruction emulation to a C implementation routine and
9017 * returns, taking one argument in addition to the standard ones.
9018 *
9019 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9020 *
9021 * @param a_pfnCImpl The pointer to the C routine.
9022 * @param a0 The argument.
9023 */
9024#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
9025
9026/**
9027 * Defers the entire instruction emulation to a C implementation routine and
9028 * returns, taking two arguments in addition to the standard ones.
9029 *
9030 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9031 *
9032 * @param a_pfnCImpl The pointer to the C routine.
9033 * @param a0 The first extra argument.
9034 * @param a1 The second extra argument.
9035 */
9036#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
9037
9038/**
9039 * Defers the entire instruction emulation to a C implementation routine and
9040 * returns, taking three arguments in addition to the standard ones.
9041 *
9042 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9043 *
9044 * @param a_pfnCImpl The pointer to the C routine.
9045 * @param a0 The first extra argument.
9046 * @param a1 The second extra argument.
9047 * @param a2 The third extra argument.
9048 */
9049#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
9050
9051/**
9052 * Calls a FPU assembly implementation taking one visible argument.
9053 *
9054 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9055 * @param a0 The first extra argument.
9056 */
9057#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
9058 do { \
9059 iemFpuPrepareUsage(pIemCpu); \
9060 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
9061 } while (0)
9062
9063/**
9064 * Calls a FPU assembly implementation taking two visible arguments.
9065 *
9066 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9067 * @param a0 The first extra argument.
9068 * @param a1 The second extra argument.
9069 */
9070#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9071 do { \
9072 iemFpuPrepareUsage(pIemCpu); \
9073 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9074 } while (0)
9075
9076/**
9077 * Calls a FPU assembly implementation taking three visible arguments.
9078 *
9079 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9080 * @param a0 The first extra argument.
9081 * @param a1 The second extra argument.
9082 * @param a2 The third extra argument.
9083 */
9084#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9085 do { \
9086 iemFpuPrepareUsage(pIemCpu); \
9087 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9088 } while (0)
9089
9090#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9091 do { \
9092 (a_FpuData).FSW = (a_FSW); \
9093 (a_FpuData).r80Result = *(a_pr80Value); \
9094 } while (0)
9095
9096/** Pushes FPU result onto the stack. */
9097#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9098 iemFpuPushResult(pIemCpu, &a_FpuData)
9099/** Pushes FPU result onto the stack and sets the FPUDP. */
9100#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9101 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9102
9103/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9104#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9105 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9106
9107/** Stores FPU result in a stack register. */
9108#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9109 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9110/** Stores FPU result in a stack register and pops the stack. */
9111#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9112 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9113/** Stores FPU result in a stack register and sets the FPUDP. */
9114#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9115 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9116/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9117 * stack. */
9118#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9119 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9120
9121/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9122#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9123 iemFpuUpdateOpcodeAndIp(pIemCpu)
9124/** Free a stack register (for FFREE and FFREEP). */
9125#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9126 iemFpuStackFree(pIemCpu, a_iStReg)
9127/** Increment the FPU stack pointer. */
9128#define IEM_MC_FPU_STACK_INC_TOP() \
9129 iemFpuStackIncTop(pIemCpu)
9130/** Decrement the FPU stack pointer. */
9131#define IEM_MC_FPU_STACK_DEC_TOP() \
9132 iemFpuStackDecTop(pIemCpu)
9133
9134/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9135#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9136 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9137/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9138#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9139 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9140/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9141#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9142 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9143/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9144#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9145 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9146/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9147 * stack. */
9148#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9149 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9150/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9151#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9152 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9153
9154/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9155#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9156 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9157/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9158 * stack. */
9159#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9160 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9161/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9162 * FPUDS. */
9163#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9164 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9165/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9166 * FPUDS. Pops stack. */
9167#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9168 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9169/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9170 * stack twice. */
9171#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9172 iemFpuStackUnderflowThenPopPop(pIemCpu)
9173/** Raises a FPU stack underflow exception for an instruction pushing a result
9174 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9175#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9176 iemFpuStackPushUnderflow(pIemCpu)
9177/** Raises a FPU stack underflow exception for an instruction pushing a result
9178 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9179#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9180 iemFpuStackPushUnderflowTwo(pIemCpu)
9181
9182/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9183 * FPUIP, FPUCS and FOP. */
9184#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9185 iemFpuStackPushOverflow(pIemCpu)
9186/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9187 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9188#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9189 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9190/** Indicates that we (might) have modified the FPU state. */
9191#define IEM_MC_USED_FPU() \
9192 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9193
9194/**
9195 * Calls a MMX assembly implementation taking two visible arguments.
9196 *
9197 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9198 * @param a0 The first extra argument.
9199 * @param a1 The second extra argument.
9200 */
9201#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9202 do { \
9203 iemFpuPrepareUsage(pIemCpu); \
9204 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9205 } while (0)
9206
9207/**
9208 * Calls a MMX assembly implementation taking three visible arguments.
9209 *
9210 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9211 * @param a0 The first extra argument.
9212 * @param a1 The second extra argument.
9213 * @param a2 The third extra argument.
9214 */
9215#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9216 do { \
9217 iemFpuPrepareUsage(pIemCpu); \
9218 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9219 } while (0)
9220
9221
9222/**
9223 * Calls a SSE assembly implementation taking two visible arguments.
9224 *
9225 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9226 * @param a0 The first extra argument.
9227 * @param a1 The second extra argument.
9228 */
9229#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9230 do { \
9231 iemFpuPrepareUsageSse(pIemCpu); \
9232 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9233 } while (0)
9234
9235/**
9236 * Calls a SSE assembly implementation taking three visible arguments.
9237 *
9238 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9239 * @param a0 The first extra argument.
9240 * @param a1 The second extra argument.
9241 * @param a2 The third extra argument.
9242 */
9243#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9244 do { \
9245 iemFpuPrepareUsageSse(pIemCpu); \
9246 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9247 } while (0)
9248
9249
9250/** @note Not for IOPL or IF testing. */
9251#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9252/** @note Not for IOPL or IF testing. */
9253#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9254/** @note Not for IOPL or IF testing. */
9255#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9256/** @note Not for IOPL or IF testing. */
9257#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9258/** @note Not for IOPL or IF testing. */
9259#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9260 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9261 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9262/** @note Not for IOPL or IF testing. */
9263#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9264 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9265 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9266/** @note Not for IOPL or IF testing. */
9267#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9268 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9269 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9270 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9271/** @note Not for IOPL or IF testing. */
9272#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9273 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9274 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9275 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9276#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9277#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9278#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9279/** @note Not for IOPL or IF testing. */
9280#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9281 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9282 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9283/** @note Not for IOPL or IF testing. */
9284#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9285 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9286 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9287/** @note Not for IOPL or IF testing. */
9288#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9289 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9290 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9291/** @note Not for IOPL or IF testing. */
9292#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9293 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9294 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9295/** @note Not for IOPL or IF testing. */
9296#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9297 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9298 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9299/** @note Not for IOPL or IF testing. */
9300#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9301 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9302 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9303#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9304#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9305#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9306 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9307#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9308 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9309#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9310 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9311#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9312 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9313#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9314 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9315#define IEM_MC_IF_FCW_IM() \
9316 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9317
9318#define IEM_MC_ELSE() } else {
9319#define IEM_MC_ENDIF() } do {} while (0)
9320
9321/** @} */
9322
9323
9324/** @name Opcode Debug Helpers.
9325 * @{
9326 */
9327#ifdef DEBUG
9328# define IEMOP_MNEMONIC(a_szMnemonic) \
9329 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9330 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9331# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9332 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9333 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9334#else
9335# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9336# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9337#endif
9338
9339/** @} */
9340
9341
9342/** @name Opcode Helpers.
9343 * @{
9344 */
9345
9346/** The instruction raises an \#UD in real and V8086 mode. */
9347#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9348 do \
9349 { \
9350 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9351 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9352 } while (0)
9353
9354/** The instruction allows no lock prefixing (in this encoding), throw \#UD if
9355 * lock prefixed.
9356 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9357#define IEMOP_HLP_NO_LOCK_PREFIX() \
9358 do \
9359 { \
9360 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9361 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9362 } while (0)
9363
9364/** The instruction is not available in 64-bit mode, throw \#UD if we're in
9365 * 64-bit mode. */
9366#define IEMOP_HLP_NO_64BIT() \
9367 do \
9368 { \
9369 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9370 return IEMOP_RAISE_INVALID_OPCODE(); \
9371 } while (0)
9372
9373/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
9374 * 64-bit mode. */
9375#define IEMOP_HLP_ONLY_64BIT() \
9376 do \
9377 { \
9378 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9379 return IEMOP_RAISE_INVALID_OPCODE(); \
9380 } while (0)
9381
9382/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9383#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9384 do \
9385 { \
9386 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9387 iemRecalEffOpSize64Default(pIemCpu); \
9388 } while (0)
9389
9390/** The instruction has 64-bit operand size if 64-bit mode. */
9391#define IEMOP_HLP_64BIT_OP_SIZE() \
9392 do \
9393 { \
9394 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9395 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9396 } while (0)
9397
9398/** Only a REX prefix immediately preceeding the first opcode byte takes
9399 * effect. This macro helps ensuring this as well as logging bad guest code. */
9400#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9401 do \
9402 { \
9403 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9404 { \
9405 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9406 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9407 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9408 pIemCpu->uRexB = 0; \
9409 pIemCpu->uRexIndex = 0; \
9410 pIemCpu->uRexReg = 0; \
9411 iemRecalEffOpSize(pIemCpu); \
9412 } \
9413 } while (0)
9414
9415/**
9416 * Done decoding.
9417 */
9418#define IEMOP_HLP_DONE_DECODING() \
9419 do \
9420 { \
9421 /*nothing for now, maybe later... */ \
9422 } while (0)
9423
9424/**
9425 * Done decoding, raise \#UD exception if lock prefix present.
9426 */
9427#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9428 do \
9429 { \
9430 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9431 { /* likely */ } \
9432 else \
9433 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9434 } while (0)
9435#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9436 do \
9437 { \
9438 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9439 { /* likely */ } \
9440 else \
9441 { \
9442 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9443 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9444 } \
9445 } while (0)
9446#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9447 do \
9448 { \
9449 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9450 { /* likely */ } \
9451 else \
9452 { \
9453 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9454 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9455 } \
9456 } while (0)
9457/**
9458 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9459 * are present.
9460 */
9461#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9462 do \
9463 { \
9464 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9465 { /* likely */ } \
9466 else \
9467 return IEMOP_RAISE_INVALID_OPCODE(); \
9468 } while (0)
9469
9470
9471/**
9472 * Calculates the effective address of a ModR/M memory operand.
9473 *
9474 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9475 *
9476 * @return Strict VBox status code.
9477 * @param pIemCpu The IEM per CPU data.
9478 * @param bRm The ModRM byte.
9479 * @param cbImm The size of any immediate following the
9480 * effective address opcode bytes. Important for
9481 * RIP relative addressing.
9482 * @param pGCPtrEff Where to return the effective address.
9483 */
9484IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9485{
9486 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9487 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9488#define SET_SS_DEF() \
9489 do \
9490 { \
9491 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9492 pIemCpu->iEffSeg = X86_SREG_SS; \
9493 } while (0)
9494
9495 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9496 {
9497/** @todo Check the effective address size crap! */
9498 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9499 {
9500 uint16_t u16EffAddr;
9501
9502 /* Handle the disp16 form with no registers first. */
9503 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9504 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9505 else
9506 {
9507 /* Get the displacment. */
9508 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9509 {
9510 case 0: u16EffAddr = 0; break;
9511 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9512 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9513 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9514 }
9515
9516 /* Add the base and index registers to the disp. */
9517 switch (bRm & X86_MODRM_RM_MASK)
9518 {
9519 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9520 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9521 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9522 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9523 case 4: u16EffAddr += pCtx->si; break;
9524 case 5: u16EffAddr += pCtx->di; break;
9525 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9526 case 7: u16EffAddr += pCtx->bx; break;
9527 }
9528 }
9529
9530 *pGCPtrEff = u16EffAddr;
9531 }
9532 else
9533 {
9534 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9535 uint32_t u32EffAddr;
9536
9537 /* Handle the disp32 form with no registers first. */
9538 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9539 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9540 else
9541 {
9542 /* Get the register (or SIB) value. */
9543 switch ((bRm & X86_MODRM_RM_MASK))
9544 {
9545 case 0: u32EffAddr = pCtx->eax; break;
9546 case 1: u32EffAddr = pCtx->ecx; break;
9547 case 2: u32EffAddr = pCtx->edx; break;
9548 case 3: u32EffAddr = pCtx->ebx; break;
9549 case 4: /* SIB */
9550 {
9551 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9552
9553 /* Get the index and scale it. */
9554 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9555 {
9556 case 0: u32EffAddr = pCtx->eax; break;
9557 case 1: u32EffAddr = pCtx->ecx; break;
9558 case 2: u32EffAddr = pCtx->edx; break;
9559 case 3: u32EffAddr = pCtx->ebx; break;
9560 case 4: u32EffAddr = 0; /*none */ break;
9561 case 5: u32EffAddr = pCtx->ebp; break;
9562 case 6: u32EffAddr = pCtx->esi; break;
9563 case 7: u32EffAddr = pCtx->edi; break;
9564 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9565 }
9566 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9567
9568 /* add base */
9569 switch (bSib & X86_SIB_BASE_MASK)
9570 {
9571 case 0: u32EffAddr += pCtx->eax; break;
9572 case 1: u32EffAddr += pCtx->ecx; break;
9573 case 2: u32EffAddr += pCtx->edx; break;
9574 case 3: u32EffAddr += pCtx->ebx; break;
9575 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9576 case 5:
9577 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9578 {
9579 u32EffAddr += pCtx->ebp;
9580 SET_SS_DEF();
9581 }
9582 else
9583 {
9584 uint32_t u32Disp;
9585 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9586 u32EffAddr += u32Disp;
9587 }
9588 break;
9589 case 6: u32EffAddr += pCtx->esi; break;
9590 case 7: u32EffAddr += pCtx->edi; break;
9591 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9592 }
9593 break;
9594 }
9595 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9596 case 6: u32EffAddr = pCtx->esi; break;
9597 case 7: u32EffAddr = pCtx->edi; break;
9598 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9599 }
9600
9601 /* Get and add the displacement. */
9602 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9603 {
9604 case 0:
9605 break;
9606 case 1:
9607 {
9608 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9609 u32EffAddr += i8Disp;
9610 break;
9611 }
9612 case 2:
9613 {
9614 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9615 u32EffAddr += u32Disp;
9616 break;
9617 }
9618 default:
9619 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9620 }
9621
9622 }
9623 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9624 *pGCPtrEff = u32EffAddr;
9625 else
9626 {
9627 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9628 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9629 }
9630 }
9631 }
9632 else
9633 {
9634 uint64_t u64EffAddr;
9635
9636 /* Handle the rip+disp32 form with no registers first. */
9637 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9638 {
9639 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9640 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9641 }
9642 else
9643 {
9644 /* Get the register (or SIB) value. */
9645 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9646 {
9647 case 0: u64EffAddr = pCtx->rax; break;
9648 case 1: u64EffAddr = pCtx->rcx; break;
9649 case 2: u64EffAddr = pCtx->rdx; break;
9650 case 3: u64EffAddr = pCtx->rbx; break;
9651 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9652 case 6: u64EffAddr = pCtx->rsi; break;
9653 case 7: u64EffAddr = pCtx->rdi; break;
9654 case 8: u64EffAddr = pCtx->r8; break;
9655 case 9: u64EffAddr = pCtx->r9; break;
9656 case 10: u64EffAddr = pCtx->r10; break;
9657 case 11: u64EffAddr = pCtx->r11; break;
9658 case 13: u64EffAddr = pCtx->r13; break;
9659 case 14: u64EffAddr = pCtx->r14; break;
9660 case 15: u64EffAddr = pCtx->r15; break;
9661 /* SIB */
9662 case 4:
9663 case 12:
9664 {
9665 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9666
9667 /* Get the index and scale it. */
9668 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9669 {
9670 case 0: u64EffAddr = pCtx->rax; break;
9671 case 1: u64EffAddr = pCtx->rcx; break;
9672 case 2: u64EffAddr = pCtx->rdx; break;
9673 case 3: u64EffAddr = pCtx->rbx; break;
9674 case 4: u64EffAddr = 0; /*none */ break;
9675 case 5: u64EffAddr = pCtx->rbp; break;
9676 case 6: u64EffAddr = pCtx->rsi; break;
9677 case 7: u64EffAddr = pCtx->rdi; break;
9678 case 8: u64EffAddr = pCtx->r8; break;
9679 case 9: u64EffAddr = pCtx->r9; break;
9680 case 10: u64EffAddr = pCtx->r10; break;
9681 case 11: u64EffAddr = pCtx->r11; break;
9682 case 12: u64EffAddr = pCtx->r12; break;
9683 case 13: u64EffAddr = pCtx->r13; break;
9684 case 14: u64EffAddr = pCtx->r14; break;
9685 case 15: u64EffAddr = pCtx->r15; break;
9686 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9687 }
9688 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9689
9690 /* add base */
9691 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9692 {
9693 case 0: u64EffAddr += pCtx->rax; break;
9694 case 1: u64EffAddr += pCtx->rcx; break;
9695 case 2: u64EffAddr += pCtx->rdx; break;
9696 case 3: u64EffAddr += pCtx->rbx; break;
9697 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9698 case 6: u64EffAddr += pCtx->rsi; break;
9699 case 7: u64EffAddr += pCtx->rdi; break;
9700 case 8: u64EffAddr += pCtx->r8; break;
9701 case 9: u64EffAddr += pCtx->r9; break;
9702 case 10: u64EffAddr += pCtx->r10; break;
9703 case 11: u64EffAddr += pCtx->r11; break;
9704 case 12: u64EffAddr += pCtx->r12; break;
9705 case 14: u64EffAddr += pCtx->r14; break;
9706 case 15: u64EffAddr += pCtx->r15; break;
9707 /* complicated encodings */
9708 case 5:
9709 case 13:
9710 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9711 {
9712 if (!pIemCpu->uRexB)
9713 {
9714 u64EffAddr += pCtx->rbp;
9715 SET_SS_DEF();
9716 }
9717 else
9718 u64EffAddr += pCtx->r13;
9719 }
9720 else
9721 {
9722 uint32_t u32Disp;
9723 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9724 u64EffAddr += (int32_t)u32Disp;
9725 }
9726 break;
9727 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9728 }
9729 break;
9730 }
9731 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9732 }
9733
9734 /* Get and add the displacement. */
9735 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9736 {
9737 case 0:
9738 break;
9739 case 1:
9740 {
9741 int8_t i8Disp;
9742 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9743 u64EffAddr += i8Disp;
9744 break;
9745 }
9746 case 2:
9747 {
9748 uint32_t u32Disp;
9749 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9750 u64EffAddr += (int32_t)u32Disp;
9751 break;
9752 }
9753 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9754 }
9755
9756 }
9757
9758 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9759 *pGCPtrEff = u64EffAddr;
9760 else
9761 {
9762 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9763 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9764 }
9765 }
9766
9767 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9768 return VINF_SUCCESS;
9769}
9770
9771/** @} */
9772
9773
9774
9775/*
9776 * Include the instructions
9777 */
9778#include "IEMAllInstructions.cpp.h"
9779
9780
9781
9782
9783#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9784
9785/**
9786 * Sets up execution verification mode.
9787 */
9788IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9789{
9790 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9791 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9792
9793 /*
9794 * Always note down the address of the current instruction.
9795 */
9796 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9797 pIemCpu->uOldRip = pOrgCtx->rip;
9798
9799 /*
9800 * Enable verification and/or logging.
9801 */
9802 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9803 if ( fNewNoRem
9804 && ( 0
9805#if 0 /* auto enable on first paged protected mode interrupt */
9806 || ( pOrgCtx->eflags.Bits.u1IF
9807 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9808 && TRPMHasTrap(pVCpu)
9809 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9810#endif
9811#if 0
9812 || ( pOrgCtx->cs == 0x10
9813 && ( pOrgCtx->rip == 0x90119e3e
9814 || pOrgCtx->rip == 0x901d9810)
9815#endif
9816#if 0 /* Auto enable DSL - FPU stuff. */
9817 || ( pOrgCtx->cs == 0x10
9818 && (// pOrgCtx->rip == 0xc02ec07f
9819 //|| pOrgCtx->rip == 0xc02ec082
9820 //|| pOrgCtx->rip == 0xc02ec0c9
9821 0
9822 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9823#endif
9824#if 0 /* Auto enable DSL - fstp st0 stuff. */
9825 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9826#endif
9827#if 0
9828 || pOrgCtx->rip == 0x9022bb3a
9829#endif
9830#if 0
9831 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9832#endif
9833#if 0
9834 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9835 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9836#endif
9837#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9838 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9839 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9840 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9841#endif
9842#if 0 /* NT4SP1 - xadd early boot. */
9843 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9844#endif
9845#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9846 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9847#endif
9848#if 0 /* NT4SP1 - cmpxchg (AMD). */
9849 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9850#endif
9851#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9852 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9853#endif
9854#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9855 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9856
9857#endif
9858#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9859 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9860
9861#endif
9862#if 0 /* NT4SP1 - frstor [ecx] */
9863 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9864#endif
9865#if 0 /* xxxxxx - All long mode code. */
9866 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9867#endif
9868#if 0 /* rep movsq linux 3.7 64-bit boot. */
9869 || (pOrgCtx->rip == 0x0000000000100241)
9870#endif
9871#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9872 || (pOrgCtx->rip == 0x000000000215e240)
9873#endif
9874#if 0 /* DOS's size-overridden iret to v8086. */
9875 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9876#endif
9877 )
9878 )
9879 {
9880 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9881 RTLogFlags(NULL, "enabled");
9882 fNewNoRem = false;
9883 }
9884 if (fNewNoRem != pIemCpu->fNoRem)
9885 {
9886 pIemCpu->fNoRem = fNewNoRem;
9887 if (!fNewNoRem)
9888 {
9889 LogAlways(("Enabling verification mode!\n"));
9890 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9891 }
9892 else
9893 LogAlways(("Disabling verification mode!\n"));
9894 }
9895
9896 /*
9897 * Switch state.
9898 */
9899 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9900 {
9901 static CPUMCTX s_DebugCtx; /* Ugly! */
9902
9903 s_DebugCtx = *pOrgCtx;
9904 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9905 }
9906
9907 /*
9908 * See if there is an interrupt pending in TRPM and inject it if we can.
9909 */
9910 pIemCpu->uInjectCpl = UINT8_MAX;
9911 if ( pOrgCtx->eflags.Bits.u1IF
9912 && TRPMHasTrap(pVCpu)
9913 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9914 {
9915 uint8_t u8TrapNo;
9916 TRPMEVENT enmType;
9917 RTGCUINT uErrCode;
9918 RTGCPTR uCr2;
9919 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9920 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9921 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9922 TRPMResetTrap(pVCpu);
9923 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9924 }
9925
9926 /*
9927 * Reset the counters.
9928 */
9929 pIemCpu->cIOReads = 0;
9930 pIemCpu->cIOWrites = 0;
9931 pIemCpu->fIgnoreRaxRdx = false;
9932 pIemCpu->fOverlappingMovs = false;
9933 pIemCpu->fProblematicMemory = false;
9934 pIemCpu->fUndefinedEFlags = 0;
9935
9936 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9937 {
9938 /*
9939 * Free all verification records.
9940 */
9941 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9942 pIemCpu->pIemEvtRecHead = NULL;
9943 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9944 do
9945 {
9946 while (pEvtRec)
9947 {
9948 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9949 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9950 pIemCpu->pFreeEvtRec = pEvtRec;
9951 pEvtRec = pNext;
9952 }
9953 pEvtRec = pIemCpu->pOtherEvtRecHead;
9954 pIemCpu->pOtherEvtRecHead = NULL;
9955 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9956 } while (pEvtRec);
9957 }
9958}
9959
9960
9961/**
9962 * Allocate an event record.
9963 * @returns Pointer to a record.
9964 */
9965IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9966{
9967 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9968 return NULL;
9969
9970 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9971 if (pEvtRec)
9972 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9973 else
9974 {
9975 if (!pIemCpu->ppIemEvtRecNext)
9976 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9977
9978 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9979 if (!pEvtRec)
9980 return NULL;
9981 }
9982 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9983 pEvtRec->pNext = NULL;
9984 return pEvtRec;
9985}
9986
9987
9988/**
9989 * IOMMMIORead notification.
9990 */
9991VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9992{
9993 PVMCPU pVCpu = VMMGetCpu(pVM);
9994 if (!pVCpu)
9995 return;
9996 PIEMCPU pIemCpu = &pVCpu->iem.s;
9997 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9998 if (!pEvtRec)
9999 return;
10000 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
10001 pEvtRec->u.RamRead.GCPhys = GCPhys;
10002 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
10003 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10004 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10005}
10006
10007
10008/**
10009 * IOMMMIOWrite notification.
10010 */
10011VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
10012{
10013 PVMCPU pVCpu = VMMGetCpu(pVM);
10014 if (!pVCpu)
10015 return;
10016 PIEMCPU pIemCpu = &pVCpu->iem.s;
10017 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10018 if (!pEvtRec)
10019 return;
10020 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
10021 pEvtRec->u.RamWrite.GCPhys = GCPhys;
10022 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
10023 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
10024 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
10025 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
10026 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
10027 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10028 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10029}
10030
10031
10032/**
10033 * IOMIOPortRead notification.
10034 */
10035VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
10036{
10037 PVMCPU pVCpu = VMMGetCpu(pVM);
10038 if (!pVCpu)
10039 return;
10040 PIEMCPU pIemCpu = &pVCpu->iem.s;
10041 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10042 if (!pEvtRec)
10043 return;
10044 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10045 pEvtRec->u.IOPortRead.Port = Port;
10046 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10047 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10048 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10049}
10050
10051/**
10052 * IOMIOPortWrite notification.
10053 */
10054VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10055{
10056 PVMCPU pVCpu = VMMGetCpu(pVM);
10057 if (!pVCpu)
10058 return;
10059 PIEMCPU pIemCpu = &pVCpu->iem.s;
10060 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10061 if (!pEvtRec)
10062 return;
10063 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10064 pEvtRec->u.IOPortWrite.Port = Port;
10065 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10066 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10067 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10068 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10069}
10070
10071
10072VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10073{
10074 PVMCPU pVCpu = VMMGetCpu(pVM);
10075 if (!pVCpu)
10076 return;
10077 PIEMCPU pIemCpu = &pVCpu->iem.s;
10078 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10079 if (!pEvtRec)
10080 return;
10081 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
10082 pEvtRec->u.IOPortStrRead.Port = Port;
10083 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
10084 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
10085 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10086 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10087}
10088
10089
10090VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10091{
10092 PVMCPU pVCpu = VMMGetCpu(pVM);
10093 if (!pVCpu)
10094 return;
10095 PIEMCPU pIemCpu = &pVCpu->iem.s;
10096 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10097 if (!pEvtRec)
10098 return;
10099 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
10100 pEvtRec->u.IOPortStrWrite.Port = Port;
10101 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
10102 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
10103 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10104 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10105}
10106
10107
10108/**
10109 * Fakes and records an I/O port read.
10110 *
10111 * @returns VINF_SUCCESS.
10112 * @param pIemCpu The IEM per CPU data.
10113 * @param Port The I/O port.
10114 * @param pu32Value Where to store the fake value.
10115 * @param cbValue The size of the access.
10116 */
10117IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10118{
10119 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10120 if (pEvtRec)
10121 {
10122 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10123 pEvtRec->u.IOPortRead.Port = Port;
10124 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10125 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10126 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10127 }
10128 pIemCpu->cIOReads++;
10129 *pu32Value = 0xcccccccc;
10130 return VINF_SUCCESS;
10131}
10132
10133
10134/**
10135 * Fakes and records an I/O port write.
10136 *
10137 * @returns VINF_SUCCESS.
10138 * @param pIemCpu The IEM per CPU data.
10139 * @param Port The I/O port.
10140 * @param u32Value The value being written.
10141 * @param cbValue The size of the access.
10142 */
10143IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10144{
10145 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10146 if (pEvtRec)
10147 {
10148 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10149 pEvtRec->u.IOPortWrite.Port = Port;
10150 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10151 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10152 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10153 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10154 }
10155 pIemCpu->cIOWrites++;
10156 return VINF_SUCCESS;
10157}
10158
10159
10160/**
10161 * Used to add extra details about a stub case.
10162 * @param pIemCpu The IEM per CPU state.
10163 */
10164IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10165{
10166 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10167 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10168 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10169 char szRegs[4096];
10170 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10171 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10172 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10173 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10174 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10175 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10176 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10177 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10178 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10179 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10180 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10181 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10182 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10183 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10184 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10185 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10186 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10187 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10188 " efer=%016VR{efer}\n"
10189 " pat=%016VR{pat}\n"
10190 " sf_mask=%016VR{sf_mask}\n"
10191 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10192 " lstar=%016VR{lstar}\n"
10193 " star=%016VR{star} cstar=%016VR{cstar}\n"
10194 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10195 );
10196
10197 char szInstr1[256];
10198 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10199 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10200 szInstr1, sizeof(szInstr1), NULL);
10201 char szInstr2[256];
10202 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10203 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10204 szInstr2, sizeof(szInstr2), NULL);
10205
10206 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10207}
10208
10209
10210/**
10211 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10212 * dump to the assertion info.
10213 *
10214 * @param pEvtRec The record to dump.
10215 */
10216IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10217{
10218 switch (pEvtRec->enmEvent)
10219 {
10220 case IEMVERIFYEVENT_IOPORT_READ:
10221 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10222 pEvtRec->u.IOPortWrite.Port,
10223 pEvtRec->u.IOPortWrite.cbValue);
10224 break;
10225 case IEMVERIFYEVENT_IOPORT_WRITE:
10226 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10227 pEvtRec->u.IOPortWrite.Port,
10228 pEvtRec->u.IOPortWrite.cbValue,
10229 pEvtRec->u.IOPortWrite.u32Value);
10230 break;
10231 case IEMVERIFYEVENT_IOPORT_STR_READ:
10232 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
10233 pEvtRec->u.IOPortStrWrite.Port,
10234 pEvtRec->u.IOPortStrWrite.cbValue,
10235 pEvtRec->u.IOPortStrWrite.cTransfers);
10236 break;
10237 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10238 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
10239 pEvtRec->u.IOPortStrWrite.Port,
10240 pEvtRec->u.IOPortStrWrite.cbValue,
10241 pEvtRec->u.IOPortStrWrite.cTransfers);
10242 break;
10243 case IEMVERIFYEVENT_RAM_READ:
10244 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10245 pEvtRec->u.RamRead.GCPhys,
10246 pEvtRec->u.RamRead.cb);
10247 break;
10248 case IEMVERIFYEVENT_RAM_WRITE:
10249 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10250 pEvtRec->u.RamWrite.GCPhys,
10251 pEvtRec->u.RamWrite.cb,
10252 (int)pEvtRec->u.RamWrite.cb,
10253 pEvtRec->u.RamWrite.ab);
10254 break;
10255 default:
10256 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10257 break;
10258 }
10259}
10260
10261
10262/**
10263 * Raises an assertion on the specified record, showing the given message with
10264 * a record dump attached.
10265 *
10266 * @param pIemCpu The IEM per CPU data.
10267 * @param pEvtRec1 The first record.
10268 * @param pEvtRec2 The second record.
10269 * @param pszMsg The message explaining why we're asserting.
10270 */
10271IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10272{
10273 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10274 iemVerifyAssertAddRecordDump(pEvtRec1);
10275 iemVerifyAssertAddRecordDump(pEvtRec2);
10276 iemVerifyAssertMsg2(pIemCpu);
10277 RTAssertPanic();
10278}
10279
10280
10281/**
10282 * Raises an assertion on the specified record, showing the given message with
10283 * a record dump attached.
10284 *
10285 * @param pIemCpu The IEM per CPU data.
10286 * @param pEvtRec1 The first record.
10287 * @param pszMsg The message explaining why we're asserting.
10288 */
10289IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10290{
10291 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10292 iemVerifyAssertAddRecordDump(pEvtRec);
10293 iemVerifyAssertMsg2(pIemCpu);
10294 RTAssertPanic();
10295}
10296
10297
10298/**
10299 * Verifies a write record.
10300 *
10301 * @param pIemCpu The IEM per CPU data.
10302 * @param pEvtRec The write record.
10303 * @param fRem Set if REM was doing the other executing. If clear
10304 * it was HM.
10305 */
10306IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10307{
10308 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10309 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10310 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10311 if ( RT_FAILURE(rc)
10312 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10313 {
10314 /* fend off ins */
10315 if ( !pIemCpu->cIOReads
10316 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10317 || ( pEvtRec->u.RamWrite.cb != 1
10318 && pEvtRec->u.RamWrite.cb != 2
10319 && pEvtRec->u.RamWrite.cb != 4) )
10320 {
10321 /* fend off ROMs and MMIO */
10322 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10323 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10324 {
10325 /* fend off fxsave */
10326 if (pEvtRec->u.RamWrite.cb != 512)
10327 {
10328 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10329 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10330 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10331 RTAssertMsg2Add("%s: %.*Rhxs\n"
10332 "iem: %.*Rhxs\n",
10333 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10334 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10335 iemVerifyAssertAddRecordDump(pEvtRec);
10336 iemVerifyAssertMsg2(pIemCpu);
10337 RTAssertPanic();
10338 }
10339 }
10340 }
10341 }
10342
10343}
10344
10345/**
10346 * Performs the post-execution verfication checks.
10347 */
10348IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10349{
10350 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10351 return;
10352
10353 /*
10354 * Switch back the state.
10355 */
10356 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10357 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10358 Assert(pOrgCtx != pDebugCtx);
10359 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10360
10361 /*
10362 * Execute the instruction in REM.
10363 */
10364 bool fRem = false;
10365 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10366 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10367 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10368#ifdef IEM_VERIFICATION_MODE_FULL_HM
10369 if ( HMIsEnabled(pVM)
10370 && pIemCpu->cIOReads == 0
10371 && pIemCpu->cIOWrites == 0
10372 && !pIemCpu->fProblematicMemory)
10373 {
10374 uint64_t uStartRip = pOrgCtx->rip;
10375 unsigned iLoops = 0;
10376 do
10377 {
10378 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10379 iLoops++;
10380 } while ( rc == VINF_SUCCESS
10381 || ( rc == VINF_EM_DBG_STEPPED
10382 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10383 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10384 || ( pOrgCtx->rip != pDebugCtx->rip
10385 && pIemCpu->uInjectCpl != UINT8_MAX
10386 && iLoops < 8) );
10387 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10388 rc = VINF_SUCCESS;
10389 }
10390#endif
10391 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10392 || rc == VINF_IOM_R3_IOPORT_READ
10393 || rc == VINF_IOM_R3_IOPORT_WRITE
10394 || rc == VINF_IOM_R3_MMIO_READ
10395 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10396 || rc == VINF_IOM_R3_MMIO_WRITE
10397 || rc == VINF_CPUM_R3_MSR_READ
10398 || rc == VINF_CPUM_R3_MSR_WRITE
10399 || rc == VINF_EM_RESCHEDULE
10400 )
10401 {
10402 EMRemLock(pVM);
10403 rc = REMR3EmulateInstruction(pVM, pVCpu);
10404 AssertRC(rc);
10405 EMRemUnlock(pVM);
10406 fRem = true;
10407 }
10408
10409 /*
10410 * Compare the register states.
10411 */
10412 unsigned cDiffs = 0;
10413 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10414 {
10415 //Log(("REM and IEM ends up with different registers!\n"));
10416 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10417
10418# define CHECK_FIELD(a_Field) \
10419 do \
10420 { \
10421 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10422 { \
10423 switch (sizeof(pOrgCtx->a_Field)) \
10424 { \
10425 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10426 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10427 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10428 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10429 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10430 } \
10431 cDiffs++; \
10432 } \
10433 } while (0)
10434# define CHECK_XSTATE_FIELD(a_Field) \
10435 do \
10436 { \
10437 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10438 { \
10439 switch (sizeof(pOrgXState->a_Field)) \
10440 { \
10441 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10442 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10443 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10444 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10445 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10446 } \
10447 cDiffs++; \
10448 } \
10449 } while (0)
10450
10451# define CHECK_BIT_FIELD(a_Field) \
10452 do \
10453 { \
10454 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10455 { \
10456 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10457 cDiffs++; \
10458 } \
10459 } while (0)
10460
10461# define CHECK_SEL(a_Sel) \
10462 do \
10463 { \
10464 CHECK_FIELD(a_Sel.Sel); \
10465 CHECK_FIELD(a_Sel.Attr.u); \
10466 CHECK_FIELD(a_Sel.u64Base); \
10467 CHECK_FIELD(a_Sel.u32Limit); \
10468 CHECK_FIELD(a_Sel.fFlags); \
10469 } while (0)
10470
10471 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10472 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10473
10474#if 1 /* The recompiler doesn't update these the intel way. */
10475 if (fRem)
10476 {
10477 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10478 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10479 pOrgXState->x87.CS = pDebugXState->x87.CS;
10480 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10481 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10482 pOrgXState->x87.DS = pDebugXState->x87.DS;
10483 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10484 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10485 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10486 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10487 }
10488#endif
10489 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10490 {
10491 RTAssertMsg2Weak(" the FPU state differs\n");
10492 cDiffs++;
10493 CHECK_XSTATE_FIELD(x87.FCW);
10494 CHECK_XSTATE_FIELD(x87.FSW);
10495 CHECK_XSTATE_FIELD(x87.FTW);
10496 CHECK_XSTATE_FIELD(x87.FOP);
10497 CHECK_XSTATE_FIELD(x87.FPUIP);
10498 CHECK_XSTATE_FIELD(x87.CS);
10499 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10500 CHECK_XSTATE_FIELD(x87.FPUDP);
10501 CHECK_XSTATE_FIELD(x87.DS);
10502 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10503 CHECK_XSTATE_FIELD(x87.MXCSR);
10504 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10505 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10506 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10507 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10508 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10509 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10510 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10511 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10512 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10513 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10514 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10515 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10516 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10517 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10518 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10519 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10520 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10521 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10522 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10523 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10524 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10525 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10526 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10527 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10528 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10529 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10530 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10531 }
10532 CHECK_FIELD(rip);
10533 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10534 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10535 {
10536 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10537 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10538 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10539 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10540 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10541 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10542 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10543 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10544 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10545 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10546 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10547 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10548 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10549 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10550 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10551 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10552 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10553 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10554 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10555 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10556 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10557 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10558 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10559 }
10560
10561 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10562 CHECK_FIELD(rax);
10563 CHECK_FIELD(rcx);
10564 if (!pIemCpu->fIgnoreRaxRdx)
10565 CHECK_FIELD(rdx);
10566 CHECK_FIELD(rbx);
10567 CHECK_FIELD(rsp);
10568 CHECK_FIELD(rbp);
10569 CHECK_FIELD(rsi);
10570 CHECK_FIELD(rdi);
10571 CHECK_FIELD(r8);
10572 CHECK_FIELD(r9);
10573 CHECK_FIELD(r10);
10574 CHECK_FIELD(r11);
10575 CHECK_FIELD(r12);
10576 CHECK_FIELD(r13);
10577 CHECK_SEL(cs);
10578 CHECK_SEL(ss);
10579 CHECK_SEL(ds);
10580 CHECK_SEL(es);
10581 CHECK_SEL(fs);
10582 CHECK_SEL(gs);
10583 CHECK_FIELD(cr0);
10584
10585 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10586 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10587 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10588 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10589 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10590 {
10591 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10592 { /* ignore */ }
10593 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10594 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10595 && fRem)
10596 { /* ignore */ }
10597 else
10598 CHECK_FIELD(cr2);
10599 }
10600 CHECK_FIELD(cr3);
10601 CHECK_FIELD(cr4);
10602 CHECK_FIELD(dr[0]);
10603 CHECK_FIELD(dr[1]);
10604 CHECK_FIELD(dr[2]);
10605 CHECK_FIELD(dr[3]);
10606 CHECK_FIELD(dr[6]);
10607 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10608 CHECK_FIELD(dr[7]);
10609 CHECK_FIELD(gdtr.cbGdt);
10610 CHECK_FIELD(gdtr.pGdt);
10611 CHECK_FIELD(idtr.cbIdt);
10612 CHECK_FIELD(idtr.pIdt);
10613 CHECK_SEL(ldtr);
10614 CHECK_SEL(tr);
10615 CHECK_FIELD(SysEnter.cs);
10616 CHECK_FIELD(SysEnter.eip);
10617 CHECK_FIELD(SysEnter.esp);
10618 CHECK_FIELD(msrEFER);
10619 CHECK_FIELD(msrSTAR);
10620 CHECK_FIELD(msrPAT);
10621 CHECK_FIELD(msrLSTAR);
10622 CHECK_FIELD(msrCSTAR);
10623 CHECK_FIELD(msrSFMASK);
10624 CHECK_FIELD(msrKERNELGSBASE);
10625
10626 if (cDiffs != 0)
10627 {
10628 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10629 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10630 iemVerifyAssertMsg2(pIemCpu);
10631 RTAssertPanic();
10632 }
10633# undef CHECK_FIELD
10634# undef CHECK_BIT_FIELD
10635 }
10636
10637 /*
10638 * If the register state compared fine, check the verification event
10639 * records.
10640 */
10641 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10642 {
10643 /*
10644 * Compare verficiation event records.
10645 * - I/O port accesses should be a 1:1 match.
10646 */
10647 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10648 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10649 while (pIemRec && pOtherRec)
10650 {
10651 /* Since we might miss RAM writes and reads, ignore reads and check
10652 that any written memory is the same extra ones. */
10653 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10654 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10655 && pIemRec->pNext)
10656 {
10657 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10658 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10659 pIemRec = pIemRec->pNext;
10660 }
10661
10662 /* Do the compare. */
10663 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10664 {
10665 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10666 break;
10667 }
10668 bool fEquals;
10669 switch (pIemRec->enmEvent)
10670 {
10671 case IEMVERIFYEVENT_IOPORT_READ:
10672 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10673 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10674 break;
10675 case IEMVERIFYEVENT_IOPORT_WRITE:
10676 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10677 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10678 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10679 break;
10680 case IEMVERIFYEVENT_IOPORT_STR_READ:
10681 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
10682 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
10683 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
10684 break;
10685 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10686 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
10687 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
10688 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
10689 break;
10690 case IEMVERIFYEVENT_RAM_READ:
10691 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10692 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10693 break;
10694 case IEMVERIFYEVENT_RAM_WRITE:
10695 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10696 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10697 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10698 break;
10699 default:
10700 fEquals = false;
10701 break;
10702 }
10703 if (!fEquals)
10704 {
10705 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10706 break;
10707 }
10708
10709 /* advance */
10710 pIemRec = pIemRec->pNext;
10711 pOtherRec = pOtherRec->pNext;
10712 }
10713
10714 /* Ignore extra writes and reads. */
10715 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10716 {
10717 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10718 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10719 pIemRec = pIemRec->pNext;
10720 }
10721 if (pIemRec != NULL)
10722 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10723 else if (pOtherRec != NULL)
10724 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10725 }
10726 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10727}
10728
10729#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10730
10731/* stubs */
10732IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10733{
10734 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10735 return VERR_INTERNAL_ERROR;
10736}
10737
10738IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10739{
10740 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10741 return VERR_INTERNAL_ERROR;
10742}
10743
10744#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10745
10746
10747#ifdef LOG_ENABLED
10748/**
10749 * Logs the current instruction.
10750 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10751 * @param pCtx The current CPU context.
10752 * @param fSameCtx Set if we have the same context information as the VMM,
10753 * clear if we may have already executed an instruction in
10754 * our debug context. When clear, we assume IEMCPU holds
10755 * valid CPU mode info.
10756 */
10757IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10758{
10759# ifdef IN_RING3
10760 if (LogIs2Enabled())
10761 {
10762 char szInstr[256];
10763 uint32_t cbInstr = 0;
10764 if (fSameCtx)
10765 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10766 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10767 szInstr, sizeof(szInstr), &cbInstr);
10768 else
10769 {
10770 uint32_t fFlags = 0;
10771 switch (pVCpu->iem.s.enmCpuMode)
10772 {
10773 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10774 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10775 case IEMMODE_16BIT:
10776 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10777 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10778 else
10779 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10780 break;
10781 }
10782 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10783 szInstr, sizeof(szInstr), &cbInstr);
10784 }
10785
10786 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10787 Log2(("****\n"
10788 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10789 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10790 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10791 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10792 " %s\n"
10793 ,
10794 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10795 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10796 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10797 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10798 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10799 szInstr));
10800
10801 if (LogIs3Enabled())
10802 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10803 }
10804 else
10805# endif
10806 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10807 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10808}
10809#endif
10810
10811
10812/**
10813 * Makes status code addjustments (pass up from I/O and access handler)
10814 * as well as maintaining statistics.
10815 *
10816 * @returns Strict VBox status code to pass up.
10817 * @param pIemCpu The IEM per CPU data.
10818 * @param rcStrict The status from executing an instruction.
10819 */
10820DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10821{
10822 if (rcStrict != VINF_SUCCESS)
10823 {
10824 if (RT_SUCCESS(rcStrict))
10825 {
10826 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10827 || rcStrict == VINF_IOM_R3_IOPORT_READ
10828 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10829 || rcStrict == VINF_IOM_R3_MMIO_READ
10830 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10831 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10832 || rcStrict == VINF_CPUM_R3_MSR_READ
10833 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10834 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10835 || rcStrict == VINF_EM_RAW_TO_R3
10836 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
10837 /* raw-mode / virt handlers only: */
10838 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10839 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10840 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10841 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10842 || rcStrict == VINF_SELM_SYNC_GDT
10843 || rcStrict == VINF_CSAM_PENDING_ACTION
10844 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10845 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10846/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10847 int32_t const rcPassUp = pIemCpu->rcPassUp;
10848 if (rcPassUp == VINF_SUCCESS)
10849 pIemCpu->cRetInfStatuses++;
10850 else if ( rcPassUp < VINF_EM_FIRST
10851 || rcPassUp > VINF_EM_LAST
10852 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10853 {
10854 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10855 pIemCpu->cRetPassUpStatus++;
10856 rcStrict = rcPassUp;
10857 }
10858 else
10859 {
10860 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10861 pIemCpu->cRetInfStatuses++;
10862 }
10863 }
10864 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10865 pIemCpu->cRetAspectNotImplemented++;
10866 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10867 pIemCpu->cRetInstrNotImplemented++;
10868#ifdef IEM_VERIFICATION_MODE_FULL
10869 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10870 rcStrict = VINF_SUCCESS;
10871#endif
10872 else
10873 pIemCpu->cRetErrStatuses++;
10874 }
10875 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10876 {
10877 pIemCpu->cRetPassUpStatus++;
10878 rcStrict = pIemCpu->rcPassUp;
10879 }
10880
10881 return rcStrict;
10882}
10883
10884
10885/**
10886 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10887 * IEMExecOneWithPrefetchedByPC.
10888 *
10889 * @return Strict VBox status code.
10890 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10891 * @param pIemCpu The IEM per CPU data.
10892 * @param fExecuteInhibit If set, execute the instruction following CLI,
10893 * POP SS and MOV SS,GR.
10894 */
10895DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10896{
10897 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10898 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10899 if (rcStrict == VINF_SUCCESS)
10900 pIemCpu->cInstructions++;
10901 if (pIemCpu->cActiveMappings > 0)
10902 iemMemRollback(pIemCpu);
10903//#ifdef DEBUG
10904// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10905//#endif
10906
10907 /* Execute the next instruction as well if a cli, pop ss or
10908 mov ss, Gr has just completed successfully. */
10909 if ( fExecuteInhibit
10910 && rcStrict == VINF_SUCCESS
10911 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10912 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10913 {
10914 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10915 if (rcStrict == VINF_SUCCESS)
10916 {
10917# ifdef LOG_ENABLED
10918 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10919# endif
10920 IEM_OPCODE_GET_NEXT_U8(&b);
10921 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10922 if (rcStrict == VINF_SUCCESS)
10923 pIemCpu->cInstructions++;
10924 if (pIemCpu->cActiveMappings > 0)
10925 iemMemRollback(pIemCpu);
10926 }
10927 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10928 }
10929
10930 /*
10931 * Return value fiddling, statistics and sanity assertions.
10932 */
10933 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10934
10935 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10936 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10937#if defined(IEM_VERIFICATION_MODE_FULL)
10938 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10939 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10940 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10941 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10942#endif
10943 return rcStrict;
10944}
10945
10946
10947#ifdef IN_RC
10948/**
10949 * Re-enters raw-mode or ensure we return to ring-3.
10950 *
10951 * @returns rcStrict, maybe modified.
10952 * @param pIemCpu The IEM CPU structure.
10953 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10954 * @param pCtx The current CPU context.
10955 * @param rcStrict The status code returne by the interpreter.
10956 */
10957DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10958{
10959 if (!pIemCpu->fInPatchCode)
10960 CPUMRawEnter(pVCpu);
10961 return rcStrict;
10962}
10963#endif
10964
10965
10966/**
10967 * Execute one instruction.
10968 *
10969 * @return Strict VBox status code.
10970 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10971 */
10972VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10973{
10974 PIEMCPU pIemCpu = &pVCpu->iem.s;
10975
10976#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10977 iemExecVerificationModeSetup(pIemCpu);
10978#endif
10979#ifdef LOG_ENABLED
10980 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10981 iemLogCurInstr(pVCpu, pCtx, true);
10982#endif
10983
10984 /*
10985 * Do the decoding and emulation.
10986 */
10987 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10988 if (rcStrict == VINF_SUCCESS)
10989 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10990
10991#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10992 /*
10993 * Assert some sanity.
10994 */
10995 iemExecVerificationModeCheck(pIemCpu);
10996#endif
10997#ifdef IN_RC
10998 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10999#endif
11000 if (rcStrict != VINF_SUCCESS)
11001 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11002 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11003 return rcStrict;
11004}
11005
11006
11007VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11008{
11009 PIEMCPU pIemCpu = &pVCpu->iem.s;
11010 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11011 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11012
11013 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11014 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11015 if (rcStrict == VINF_SUCCESS)
11016 {
11017 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11018 if (pcbWritten)
11019 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11020 }
11021
11022#ifdef IN_RC
11023 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11024#endif
11025 return rcStrict;
11026}
11027
11028
11029VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11030 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11031{
11032 PIEMCPU pIemCpu = &pVCpu->iem.s;
11033 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11034 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11035
11036 VBOXSTRICTRC rcStrict;
11037 if ( cbOpcodeBytes
11038 && pCtx->rip == OpcodeBytesPC)
11039 {
11040 iemInitDecoder(pIemCpu, false);
11041 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11042 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11043 rcStrict = VINF_SUCCESS;
11044 }
11045 else
11046 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11047 if (rcStrict == VINF_SUCCESS)
11048 {
11049 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11050 }
11051
11052#ifdef IN_RC
11053 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11054#endif
11055 return rcStrict;
11056}
11057
11058
11059VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11060{
11061 PIEMCPU pIemCpu = &pVCpu->iem.s;
11062 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11063 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11064
11065 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11066 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11067 if (rcStrict == VINF_SUCCESS)
11068 {
11069 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11070 if (pcbWritten)
11071 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11072 }
11073
11074#ifdef IN_RC
11075 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11076#endif
11077 return rcStrict;
11078}
11079
11080
11081VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11082 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11083{
11084 PIEMCPU pIemCpu = &pVCpu->iem.s;
11085 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11086 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11087
11088 VBOXSTRICTRC rcStrict;
11089 if ( cbOpcodeBytes
11090 && pCtx->rip == OpcodeBytesPC)
11091 {
11092 iemInitDecoder(pIemCpu, true);
11093 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11094 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11095 rcStrict = VINF_SUCCESS;
11096 }
11097 else
11098 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11099 if (rcStrict == VINF_SUCCESS)
11100 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11101
11102#ifdef IN_RC
11103 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11104#endif
11105 return rcStrict;
11106}
11107
11108
11109VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11110{
11111 PIEMCPU pIemCpu = &pVCpu->iem.s;
11112
11113 /*
11114 * See if there is an interrupt pending in TRPM and inject it if we can.
11115 */
11116#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11117 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11118# ifdef IEM_VERIFICATION_MODE_FULL
11119 pIemCpu->uInjectCpl = UINT8_MAX;
11120# endif
11121 if ( pCtx->eflags.Bits.u1IF
11122 && TRPMHasTrap(pVCpu)
11123 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11124 {
11125 uint8_t u8TrapNo;
11126 TRPMEVENT enmType;
11127 RTGCUINT uErrCode;
11128 RTGCPTR uCr2;
11129 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11130 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11131 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11132 TRPMResetTrap(pVCpu);
11133 }
11134#else
11135 iemExecVerificationModeSetup(pIemCpu);
11136 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11137#endif
11138
11139 /*
11140 * Log the state.
11141 */
11142#ifdef LOG_ENABLED
11143 iemLogCurInstr(pVCpu, pCtx, true);
11144#endif
11145
11146 /*
11147 * Do the decoding and emulation.
11148 */
11149 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11150 if (rcStrict == VINF_SUCCESS)
11151 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11152
11153#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11154 /*
11155 * Assert some sanity.
11156 */
11157 iemExecVerificationModeCheck(pIemCpu);
11158#endif
11159
11160 /*
11161 * Maybe re-enter raw-mode and log.
11162 */
11163#ifdef IN_RC
11164 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11165#endif
11166 if (rcStrict != VINF_SUCCESS)
11167 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11168 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11169 return rcStrict;
11170}
11171
11172
11173
11174/**
11175 * Injects a trap, fault, abort, software interrupt or external interrupt.
11176 *
11177 * The parameter list matches TRPMQueryTrapAll pretty closely.
11178 *
11179 * @returns Strict VBox status code.
11180 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11181 * @param u8TrapNo The trap number.
11182 * @param enmType What type is it (trap/fault/abort), software
11183 * interrupt or hardware interrupt.
11184 * @param uErrCode The error code if applicable.
11185 * @param uCr2 The CR2 value if applicable.
11186 * @param cbInstr The instruction length (only relevant for
11187 * software interrupts).
11188 */
11189VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11190 uint8_t cbInstr)
11191{
11192 iemInitDecoder(&pVCpu->iem.s, false);
11193#ifdef DBGFTRACE_ENABLED
11194 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11195 u8TrapNo, enmType, uErrCode, uCr2);
11196#endif
11197
11198 uint32_t fFlags;
11199 switch (enmType)
11200 {
11201 case TRPM_HARDWARE_INT:
11202 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11203 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11204 uErrCode = uCr2 = 0;
11205 break;
11206
11207 case TRPM_SOFTWARE_INT:
11208 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11209 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11210 uErrCode = uCr2 = 0;
11211 break;
11212
11213 case TRPM_TRAP:
11214 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11215 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11216 if (u8TrapNo == X86_XCPT_PF)
11217 fFlags |= IEM_XCPT_FLAGS_CR2;
11218 switch (u8TrapNo)
11219 {
11220 case X86_XCPT_DF:
11221 case X86_XCPT_TS:
11222 case X86_XCPT_NP:
11223 case X86_XCPT_SS:
11224 case X86_XCPT_PF:
11225 case X86_XCPT_AC:
11226 fFlags |= IEM_XCPT_FLAGS_ERR;
11227 break;
11228
11229 case X86_XCPT_NMI:
11230 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11231 break;
11232 }
11233 break;
11234
11235 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11236 }
11237
11238 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11239}
11240
11241
11242/**
11243 * Injects the active TRPM event.
11244 *
11245 * @returns Strict VBox status code.
11246 * @param pVCpu The cross context virtual CPU structure.
11247 */
11248VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11249{
11250#ifndef IEM_IMPLEMENTS_TASKSWITCH
11251 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11252#else
11253 uint8_t u8TrapNo;
11254 TRPMEVENT enmType;
11255 RTGCUINT uErrCode;
11256 RTGCUINTPTR uCr2;
11257 uint8_t cbInstr;
11258 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11259 if (RT_FAILURE(rc))
11260 return rc;
11261
11262 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11263
11264 /** @todo Are there any other codes that imply the event was successfully
11265 * delivered to the guest? See @bugref{6607}. */
11266 if ( rcStrict == VINF_SUCCESS
11267 || rcStrict == VINF_IEM_RAISED_XCPT)
11268 {
11269 TRPMResetTrap(pVCpu);
11270 }
11271 return rcStrict;
11272#endif
11273}
11274
11275
11276VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11277{
11278 return VERR_NOT_IMPLEMENTED;
11279}
11280
11281
11282VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11283{
11284 return VERR_NOT_IMPLEMENTED;
11285}
11286
11287
11288#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11289/**
11290 * Executes a IRET instruction with default operand size.
11291 *
11292 * This is for PATM.
11293 *
11294 * @returns VBox status code.
11295 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11296 * @param pCtxCore The register frame.
11297 */
11298VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11299{
11300 PIEMCPU pIemCpu = &pVCpu->iem.s;
11301 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11302
11303 iemCtxCoreToCtx(pCtx, pCtxCore);
11304 iemInitDecoder(pIemCpu);
11305 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11306 if (rcStrict == VINF_SUCCESS)
11307 iemCtxToCtxCore(pCtxCore, pCtx);
11308 else
11309 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11310 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11311 return rcStrict;
11312}
11313#endif
11314
11315
11316/**
11317 * Macro used by the IEMExec* method to check the given instruction length.
11318 *
11319 * Will return on failure!
11320 *
11321 * @param a_cbInstr The given instruction length.
11322 * @param a_cbMin The minimum length.
11323 */
11324#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11325 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11326 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11327
11328
11329/**
11330 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11331 *
11332 * This API ASSUMES that the caller has already verified that the guest code is
11333 * allowed to access the I/O port. (The I/O port is in the DX register in the
11334 * guest state.)
11335 *
11336 * @returns Strict VBox status code.
11337 * @param pVCpu The cross context virtual CPU structure.
11338 * @param cbValue The size of the I/O port access (1, 2, or 4).
11339 * @param enmAddrMode The addressing mode.
11340 * @param fRepPrefix Indicates whether a repeat prefix is used
11341 * (doesn't matter which for this instruction).
11342 * @param cbInstr The instruction length in bytes.
11343 * @param iEffSeg The effective segment address.
11344 */
11345VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11346 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11347{
11348 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11349 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11350
11351 /*
11352 * State init.
11353 */
11354 PIEMCPU pIemCpu = &pVCpu->iem.s;
11355 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11356
11357 /*
11358 * Switch orgy for getting to the right handler.
11359 */
11360 VBOXSTRICTRC rcStrict;
11361 if (fRepPrefix)
11362 {
11363 switch (enmAddrMode)
11364 {
11365 case IEMMODE_16BIT:
11366 switch (cbValue)
11367 {
11368 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11369 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11370 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11371 default:
11372 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11373 }
11374 break;
11375
11376 case IEMMODE_32BIT:
11377 switch (cbValue)
11378 {
11379 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11380 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11381 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11382 default:
11383 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11384 }
11385 break;
11386
11387 case IEMMODE_64BIT:
11388 switch (cbValue)
11389 {
11390 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11391 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11392 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11393 default:
11394 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11395 }
11396 break;
11397
11398 default:
11399 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11400 }
11401 }
11402 else
11403 {
11404 switch (enmAddrMode)
11405 {
11406 case IEMMODE_16BIT:
11407 switch (cbValue)
11408 {
11409 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11410 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11411 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11412 default:
11413 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11414 }
11415 break;
11416
11417 case IEMMODE_32BIT:
11418 switch (cbValue)
11419 {
11420 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11421 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11422 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11423 default:
11424 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11425 }
11426 break;
11427
11428 case IEMMODE_64BIT:
11429 switch (cbValue)
11430 {
11431 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11432 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11433 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11434 default:
11435 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11436 }
11437 break;
11438
11439 default:
11440 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11441 }
11442 }
11443
11444 iemUninitExec(pIemCpu);
11445 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11446}
11447
11448
11449/**
11450 * Interface for HM and EM for executing string I/O IN (read) instructions.
11451 *
11452 * This API ASSUMES that the caller has already verified that the guest code is
11453 * allowed to access the I/O port. (The I/O port is in the DX register in the
11454 * guest state.)
11455 *
11456 * @returns Strict VBox status code.
11457 * @param pVCpu The cross context virtual CPU structure.
11458 * @param cbValue The size of the I/O port access (1, 2, or 4).
11459 * @param enmAddrMode The addressing mode.
11460 * @param fRepPrefix Indicates whether a repeat prefix is used
11461 * (doesn't matter which for this instruction).
11462 * @param cbInstr The instruction length in bytes.
11463 */
11464VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11465 bool fRepPrefix, uint8_t cbInstr)
11466{
11467 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11468
11469 /*
11470 * State init.
11471 */
11472 PIEMCPU pIemCpu = &pVCpu->iem.s;
11473 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11474
11475 /*
11476 * Switch orgy for getting to the right handler.
11477 */
11478 VBOXSTRICTRC rcStrict;
11479 if (fRepPrefix)
11480 {
11481 switch (enmAddrMode)
11482 {
11483 case IEMMODE_16BIT:
11484 switch (cbValue)
11485 {
11486 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11487 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11488 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11489 default:
11490 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11491 }
11492 break;
11493
11494 case IEMMODE_32BIT:
11495 switch (cbValue)
11496 {
11497 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11498 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11499 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11500 default:
11501 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11502 }
11503 break;
11504
11505 case IEMMODE_64BIT:
11506 switch (cbValue)
11507 {
11508 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11509 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11510 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11511 default:
11512 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11513 }
11514 break;
11515
11516 default:
11517 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11518 }
11519 }
11520 else
11521 {
11522 switch (enmAddrMode)
11523 {
11524 case IEMMODE_16BIT:
11525 switch (cbValue)
11526 {
11527 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11528 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11529 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11530 default:
11531 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11532 }
11533 break;
11534
11535 case IEMMODE_32BIT:
11536 switch (cbValue)
11537 {
11538 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11539 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11540 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11541 default:
11542 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11543 }
11544 break;
11545
11546 case IEMMODE_64BIT:
11547 switch (cbValue)
11548 {
11549 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11550 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11551 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11552 default:
11553 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11554 }
11555 break;
11556
11557 default:
11558 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11559 }
11560 }
11561
11562 iemUninitExec(pIemCpu);
11563 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11564}
11565
11566
11567
11568/**
11569 * Interface for HM and EM to write to a CRx register.
11570 *
11571 * @returns Strict VBox status code.
11572 * @param pVCpu The cross context virtual CPU structure.
11573 * @param cbInstr The instruction length in bytes.
11574 * @param iCrReg The control register number (destination).
11575 * @param iGReg The general purpose register number (source).
11576 *
11577 * @remarks In ring-0 not all of the state needs to be synced in.
11578 */
11579VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11580{
11581 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11582 Assert(iCrReg < 16);
11583 Assert(iGReg < 16);
11584
11585 PIEMCPU pIemCpu = &pVCpu->iem.s;
11586 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11587 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11588 iemUninitExec(pIemCpu);
11589 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11590}
11591
11592
11593/**
11594 * Interface for HM and EM to read from a CRx register.
11595 *
11596 * @returns Strict VBox status code.
11597 * @param pVCpu The cross context virtual CPU structure.
11598 * @param cbInstr The instruction length in bytes.
11599 * @param iGReg The general purpose register number (destination).
11600 * @param iCrReg The control register number (source).
11601 *
11602 * @remarks In ring-0 not all of the state needs to be synced in.
11603 */
11604VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11605{
11606 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11607 Assert(iCrReg < 16);
11608 Assert(iGReg < 16);
11609
11610 PIEMCPU pIemCpu = &pVCpu->iem.s;
11611 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11612 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11613 iemUninitExec(pIemCpu);
11614 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11615}
11616
11617
11618/**
11619 * Interface for HM and EM to clear the CR0[TS] bit.
11620 *
11621 * @returns Strict VBox status code.
11622 * @param pVCpu The cross context virtual CPU structure.
11623 * @param cbInstr The instruction length in bytes.
11624 *
11625 * @remarks In ring-0 not all of the state needs to be synced in.
11626 */
11627VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11628{
11629 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11630
11631 PIEMCPU pIemCpu = &pVCpu->iem.s;
11632 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11633 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11634 iemUninitExec(pIemCpu);
11635 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11636}
11637
11638
11639/**
11640 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11641 *
11642 * @returns Strict VBox status code.
11643 * @param pVCpu The cross context virtual CPU structure.
11644 * @param cbInstr The instruction length in bytes.
11645 * @param uValue The value to load into CR0.
11646 *
11647 * @remarks In ring-0 not all of the state needs to be synced in.
11648 */
11649VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11650{
11651 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11652
11653 PIEMCPU pIemCpu = &pVCpu->iem.s;
11654 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11655 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11656 iemUninitExec(pIemCpu);
11657 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11658}
11659
11660
11661/**
11662 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11663 *
11664 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11665 *
11666 * @returns Strict VBox status code.
11667 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11668 * @param cbInstr The instruction length in bytes.
11669 * @remarks In ring-0 not all of the state needs to be synced in.
11670 * @thread EMT(pVCpu)
11671 */
11672VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11673{
11674 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11675
11676 PIEMCPU pIemCpu = &pVCpu->iem.s;
11677 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11678 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11679 iemUninitExec(pIemCpu);
11680 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11681}
11682
11683#ifdef IN_RING3
11684
11685/**
11686 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11687 *
11688 * @returns Merge between @a rcStrict and what the commit operation returned.
11689 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11690 * @param rcStrict The status code returned by ring-0 or raw-mode.
11691 */
11692VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3DoPendingAction(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
11693{
11694 PIEMCPU pIemCpu = &pVCpu->iem.s;
11695
11696 /*
11697 * Retrieve and reset the pending commit.
11698 */
11699 IEMCOMMIT const enmFn = pIemCpu->PendingCommit.enmFn;
11700 pIemCpu->PendingCommit.enmFn = IEMCOMMIT_INVALID;
11701 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11702
11703 /*
11704 * Must reset pass-up status code.
11705 */
11706 pIemCpu->rcPassUp = VINF_SUCCESS;
11707
11708 /*
11709 * Call the function. Currently using switch here instead of function
11710 * pointer table as a switch won't get skewed.
11711 */
11712 VBOXSTRICTRC rcStrictCommit;
11713 switch (enmFn)
11714 {
11715 case IEMCOMMIT_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11716 case IEMCOMMIT_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11717 case IEMCOMMIT_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11718 case IEMCOMMIT_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11719 case IEMCOMMIT_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11720 case IEMCOMMIT_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11721 case IEMCOMMIT_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11722 case IEMCOMMIT_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11723 case IEMCOMMIT_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11724 case IEMCOMMIT_REP_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11725 case IEMCOMMIT_REP_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11726 case IEMCOMMIT_REP_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11727 case IEMCOMMIT_REP_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11728 case IEMCOMMIT_REP_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11729 case IEMCOMMIT_REP_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11730 case IEMCOMMIT_REP_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11731 case IEMCOMMIT_REP_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11732 case IEMCOMMIT_REP_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11733 default:
11734 AssertLogRelMsgFailedReturn(("enmFn=%#x (%d)\n", pIemCpu->PendingCommit.enmFn, pIemCpu->PendingCommit.enmFn), VERR_IEM_IPE_2);
11735 }
11736
11737 /*
11738 * Merge status code (if any) with the incomming one.
11739 */
11740 rcStrictCommit = iemExecStatusCodeFiddling(pIemCpu, rcStrictCommit);
11741 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11742 return rcStrict;
11743 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11744 return rcStrictCommit;
11745
11746 /* Complicated. */
11747 if (RT_FAILURE(rcStrict))
11748 return rcStrict;
11749 if (RT_FAILURE(rcStrictCommit))
11750 return rcStrictCommit;
11751 if ( rcStrict >= VINF_EM_FIRST
11752 && rcStrict <= VINF_EM_LAST)
11753 {
11754 if ( rcStrictCommit >= VINF_EM_FIRST
11755 && rcStrictCommit <= VINF_EM_LAST)
11756 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11757
11758 /* This really shouldn't happen. Check PGM + handler code! */
11759 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_1);
11760 }
11761 /* This shouldn't really happen either, see IOM_SUCCESS. */
11762 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_2);
11763}
11764
11765#endif /* IN_RING */
11766
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette