VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 60779

Last change on this file since 60779 was 60779, checked in by vboxsync, 9 years ago

IEM: Fixed cross page CR2 value.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 449.5 KB
Line 
1/* $Id: IEMAll.cpp 60779 2016-05-02 08:50:50Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 *
72 */
73
74/** @def IEM_VERIFICATION_MODE_MINIMAL
75 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
76 * context. */
77#if defined(DOXYGEN_RUNNING)
78# define IEM_VERIFICATION_MODE_MINIMAL
79#endif
80//#define IEM_LOG_MEMORY_WRITES
81#define IEM_IMPLEMENTS_TASKSWITCH
82
83
84/*********************************************************************************************************************************
85* Header Files *
86*********************************************************************************************************************************/
87#define LOG_GROUP LOG_GROUP_IEM
88#include <VBox/vmm/iem.h>
89#include <VBox/vmm/cpum.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <internal/pgm.h>
93#include <VBox/vmm/iom.h>
94#include <VBox/vmm/em.h>
95#include <VBox/vmm/hm.h>
96#include <VBox/vmm/tm.h>
97#include <VBox/vmm/dbgf.h>
98#include <VBox/vmm/dbgftrace.h>
99#ifdef VBOX_WITH_RAW_MODE_NOT_R0
100# include <VBox/vmm/patm.h>
101# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
102# include <VBox/vmm/csam.h>
103# endif
104#endif
105#include "IEMInternal.h"
106#ifdef IEM_VERIFICATION_MODE_FULL
107# include <VBox/vmm/rem.h>
108# include <VBox/vmm/mm.h>
109#endif
110#include <VBox/vmm/vm.h>
111#include <VBox/log.h>
112#include <VBox/err.h>
113#include <VBox/param.h>
114#include <VBox/dis.h>
115#include <VBox/disopcode.h>
116#include <iprt/assert.h>
117#include <iprt/string.h>
118#include <iprt/x86.h>
119
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/** @typedef PFNIEMOP
126 * Pointer to an opcode decoder function.
127 */
128
129/** @def FNIEMOP_DEF
130 * Define an opcode decoder function.
131 *
132 * We're using macors for this so that adding and removing parameters as well as
133 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
134 *
135 * @param a_Name The function name.
136 */
137
138
139#if defined(__GNUC__) && defined(RT_ARCH_X86)
140typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
141# define FNIEMOP_DEF(a_Name) \
142 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
143# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
144 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
145# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
146 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
147
148#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
149typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
150# define FNIEMOP_DEF(a_Name) \
151 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
152# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
153 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
154# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
155 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
156
157#elif defined(__GNUC__)
158typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
159# define FNIEMOP_DEF(a_Name) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
161# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
163# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
165
166#else
167typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
168# define FNIEMOP_DEF(a_Name) \
169 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
170# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
171 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
173 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
174
175#endif
176
177
178/**
179 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
180 */
181typedef union IEMSELDESC
182{
183 /** The legacy view. */
184 X86DESC Legacy;
185 /** The long mode view. */
186 X86DESC64 Long;
187} IEMSELDESC;
188/** Pointer to a selector descriptor table entry. */
189typedef IEMSELDESC *PIEMSELDESC;
190
191
192/*********************************************************************************************************************************
193* Defined Constants And Macros *
194*********************************************************************************************************************************/
195/** Temporary hack to disable the double execution. Will be removed in favor
196 * of a dedicated execution mode in EM. */
197//#define IEM_VERIFICATION_MODE_NO_REM
198
199/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
200 * due to GCC lacking knowledge about the value range of a switch. */
201#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
202
203/**
204 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
205 * occation.
206 */
207#ifdef LOG_ENABLED
208# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
209 do { \
210 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
212 } while (0)
213#else
214# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
215 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
216#endif
217
218/**
219 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
220 * occation using the supplied logger statement.
221 *
222 * @param a_LoggerArgs What to log on failure.
223 */
224#ifdef LOG_ENABLED
225# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
226 do { \
227 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
228 /*LogFunc(a_LoggerArgs);*/ \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
230 } while (0)
231#else
232# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
233 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
234#endif
235
236/**
237 * Call an opcode decoder function.
238 *
239 * We're using macors for this so that adding and removing parameters can be
240 * done as we please. See FNIEMOP_DEF.
241 */
242#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
243
244/**
245 * Call a common opcode decoder function taking one extra argument.
246 *
247 * We're using macors for this so that adding and removing parameters can be
248 * done as we please. See FNIEMOP_DEF_1.
249 */
250#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
251
252/**
253 * Call a common opcode decoder function taking one extra argument.
254 *
255 * We're using macors for this so that adding and removing parameters can be
256 * done as we please. See FNIEMOP_DEF_1.
257 */
258#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
259
260/**
261 * Check if we're currently executing in real or virtual 8086 mode.
262 *
263 * @returns @c true if it is, @c false if not.
264 * @param a_pIemCpu The IEM state of the current CPU.
265 */
266#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
267
268/**
269 * Check if we're currently executing in virtual 8086 mode.
270 *
271 * @returns @c true if it is, @c false if not.
272 * @param a_pIemCpu The IEM state of the current CPU.
273 */
274#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
275
276/**
277 * Check if we're currently executing in long mode.
278 *
279 * @returns @c true if it is, @c false if not.
280 * @param a_pIemCpu The IEM state of the current CPU.
281 */
282#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
283
284/**
285 * Check if we're currently executing in real mode.
286 *
287 * @returns @c true if it is, @c false if not.
288 * @param a_pIemCpu The IEM state of the current CPU.
289 */
290#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
291
292/**
293 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
294 * @returns PCCPUMFEATURES
295 * @param a_pIemCpu The IEM state of the current CPU.
296 */
297#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
298
299/**
300 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
301 * @returns PCCPUMFEATURES
302 * @param a_pIemCpu The IEM state of the current CPU.
303 */
304#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
305
306/**
307 * Evaluates to true if we're presenting an Intel CPU to the guest.
308 */
309#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
310
311/**
312 * Evaluates to true if we're presenting an AMD CPU to the guest.
313 */
314#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
315
316/**
317 * Check if the address is canonical.
318 */
319#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
320
321
322/*********************************************************************************************************************************
323* Global Variables *
324*********************************************************************************************************************************/
325extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
326
327
328/** Function table for the ADD instruction. */
329IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
330{
331 iemAImpl_add_u8, iemAImpl_add_u8_locked,
332 iemAImpl_add_u16, iemAImpl_add_u16_locked,
333 iemAImpl_add_u32, iemAImpl_add_u32_locked,
334 iemAImpl_add_u64, iemAImpl_add_u64_locked
335};
336
337/** Function table for the ADC instruction. */
338IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
339{
340 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
341 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
342 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
343 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
344};
345
346/** Function table for the SUB instruction. */
347IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
348{
349 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
350 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
351 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
352 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
353};
354
355/** Function table for the SBB instruction. */
356IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
357{
358 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
359 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
360 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
361 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
362};
363
364/** Function table for the OR instruction. */
365IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
366{
367 iemAImpl_or_u8, iemAImpl_or_u8_locked,
368 iemAImpl_or_u16, iemAImpl_or_u16_locked,
369 iemAImpl_or_u32, iemAImpl_or_u32_locked,
370 iemAImpl_or_u64, iemAImpl_or_u64_locked
371};
372
373/** Function table for the XOR instruction. */
374IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
375{
376 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
377 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
378 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
379 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
380};
381
382/** Function table for the AND instruction. */
383IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
384{
385 iemAImpl_and_u8, iemAImpl_and_u8_locked,
386 iemAImpl_and_u16, iemAImpl_and_u16_locked,
387 iemAImpl_and_u32, iemAImpl_and_u32_locked,
388 iemAImpl_and_u64, iemAImpl_and_u64_locked
389};
390
391/** Function table for the CMP instruction.
392 * @remarks Making operand order ASSUMPTIONS.
393 */
394IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
395{
396 iemAImpl_cmp_u8, NULL,
397 iemAImpl_cmp_u16, NULL,
398 iemAImpl_cmp_u32, NULL,
399 iemAImpl_cmp_u64, NULL
400};
401
402/** Function table for the TEST instruction.
403 * @remarks Making operand order ASSUMPTIONS.
404 */
405IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
406{
407 iemAImpl_test_u8, NULL,
408 iemAImpl_test_u16, NULL,
409 iemAImpl_test_u32, NULL,
410 iemAImpl_test_u64, NULL
411};
412
413/** Function table for the BT instruction. */
414IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
415{
416 NULL, NULL,
417 iemAImpl_bt_u16, NULL,
418 iemAImpl_bt_u32, NULL,
419 iemAImpl_bt_u64, NULL
420};
421
422/** Function table for the BTC instruction. */
423IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
424{
425 NULL, NULL,
426 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
427 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
428 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
429};
430
431/** Function table for the BTR instruction. */
432IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
433{
434 NULL, NULL,
435 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
436 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
437 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
438};
439
440/** Function table for the BTS instruction. */
441IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
442{
443 NULL, NULL,
444 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
445 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
446 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
447};
448
449/** Function table for the BSF instruction. */
450IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
451{
452 NULL, NULL,
453 iemAImpl_bsf_u16, NULL,
454 iemAImpl_bsf_u32, NULL,
455 iemAImpl_bsf_u64, NULL
456};
457
458/** Function table for the BSR instruction. */
459IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
460{
461 NULL, NULL,
462 iemAImpl_bsr_u16, NULL,
463 iemAImpl_bsr_u32, NULL,
464 iemAImpl_bsr_u64, NULL
465};
466
467/** Function table for the IMUL instruction. */
468IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
469{
470 NULL, NULL,
471 iemAImpl_imul_two_u16, NULL,
472 iemAImpl_imul_two_u32, NULL,
473 iemAImpl_imul_two_u64, NULL
474};
475
476/** Group 1 /r lookup table. */
477IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
478{
479 &g_iemAImpl_add,
480 &g_iemAImpl_or,
481 &g_iemAImpl_adc,
482 &g_iemAImpl_sbb,
483 &g_iemAImpl_and,
484 &g_iemAImpl_sub,
485 &g_iemAImpl_xor,
486 &g_iemAImpl_cmp
487};
488
489/** Function table for the INC instruction. */
490IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
491{
492 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
493 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
494 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
495 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
496};
497
498/** Function table for the DEC instruction. */
499IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
500{
501 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
502 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
503 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
504 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
505};
506
507/** Function table for the NEG instruction. */
508IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
509{
510 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
511 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
512 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
513 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
514};
515
516/** Function table for the NOT instruction. */
517IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
518{
519 iemAImpl_not_u8, iemAImpl_not_u8_locked,
520 iemAImpl_not_u16, iemAImpl_not_u16_locked,
521 iemAImpl_not_u32, iemAImpl_not_u32_locked,
522 iemAImpl_not_u64, iemAImpl_not_u64_locked
523};
524
525
526/** Function table for the ROL instruction. */
527IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
528{
529 iemAImpl_rol_u8,
530 iemAImpl_rol_u16,
531 iemAImpl_rol_u32,
532 iemAImpl_rol_u64
533};
534
535/** Function table for the ROR instruction. */
536IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
537{
538 iemAImpl_ror_u8,
539 iemAImpl_ror_u16,
540 iemAImpl_ror_u32,
541 iemAImpl_ror_u64
542};
543
544/** Function table for the RCL instruction. */
545IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
546{
547 iemAImpl_rcl_u8,
548 iemAImpl_rcl_u16,
549 iemAImpl_rcl_u32,
550 iemAImpl_rcl_u64
551};
552
553/** Function table for the RCR instruction. */
554IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
555{
556 iemAImpl_rcr_u8,
557 iemAImpl_rcr_u16,
558 iemAImpl_rcr_u32,
559 iemAImpl_rcr_u64
560};
561
562/** Function table for the SHL instruction. */
563IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
564{
565 iemAImpl_shl_u8,
566 iemAImpl_shl_u16,
567 iemAImpl_shl_u32,
568 iemAImpl_shl_u64
569};
570
571/** Function table for the SHR instruction. */
572IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
573{
574 iemAImpl_shr_u8,
575 iemAImpl_shr_u16,
576 iemAImpl_shr_u32,
577 iemAImpl_shr_u64
578};
579
580/** Function table for the SAR instruction. */
581IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
582{
583 iemAImpl_sar_u8,
584 iemAImpl_sar_u16,
585 iemAImpl_sar_u32,
586 iemAImpl_sar_u64
587};
588
589
590/** Function table for the MUL instruction. */
591IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
592{
593 iemAImpl_mul_u8,
594 iemAImpl_mul_u16,
595 iemAImpl_mul_u32,
596 iemAImpl_mul_u64
597};
598
599/** Function table for the IMUL instruction working implicitly on rAX. */
600IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
601{
602 iemAImpl_imul_u8,
603 iemAImpl_imul_u16,
604 iemAImpl_imul_u32,
605 iemAImpl_imul_u64
606};
607
608/** Function table for the DIV instruction. */
609IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
610{
611 iemAImpl_div_u8,
612 iemAImpl_div_u16,
613 iemAImpl_div_u32,
614 iemAImpl_div_u64
615};
616
617/** Function table for the MUL instruction. */
618IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
619{
620 iemAImpl_idiv_u8,
621 iemAImpl_idiv_u16,
622 iemAImpl_idiv_u32,
623 iemAImpl_idiv_u64
624};
625
626/** Function table for the SHLD instruction */
627IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
628{
629 iemAImpl_shld_u16,
630 iemAImpl_shld_u32,
631 iemAImpl_shld_u64,
632};
633
634/** Function table for the SHRD instruction */
635IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
636{
637 iemAImpl_shrd_u16,
638 iemAImpl_shrd_u32,
639 iemAImpl_shrd_u64,
640};
641
642
643/** Function table for the PUNPCKLBW instruction */
644IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
645/** Function table for the PUNPCKLBD instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
647/** Function table for the PUNPCKLDQ instruction */
648IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
649/** Function table for the PUNPCKLQDQ instruction */
650IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
651
652/** Function table for the PUNPCKHBW instruction */
653IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
654/** Function table for the PUNPCKHBD instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
656/** Function table for the PUNPCKHDQ instruction */
657IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
658/** Function table for the PUNPCKHQDQ instruction */
659IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
660
661/** Function table for the PXOR instruction */
662IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
663/** Function table for the PCMPEQB instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
665/** Function table for the PCMPEQW instruction */
666IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
667/** Function table for the PCMPEQD instruction */
668IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
669
670
671#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
672/** What IEM just wrote. */
673uint8_t g_abIemWrote[256];
674/** How much IEM just wrote. */
675size_t g_cbIemWrote;
676#endif
677
678
679/*********************************************************************************************************************************
680* Internal Functions *
681*********************************************************************************************************************************/
682IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
683IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
684IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
685IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
686/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
687IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
688IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
689IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
691IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
692IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
693IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
694IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
695IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
696IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
698IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
699IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
700IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
701IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
706IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
707IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
708IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
709IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
710IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
711IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
712IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
713IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
714IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
715
716#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
717IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
718#endif
719IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
720IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
721
722
723
724/**
725 * Sets the pass up status.
726 *
727 * @returns VINF_SUCCESS.
728 * @param pIemCpu The per CPU IEM state of the calling thread.
729 * @param rcPassUp The pass up status. Must be informational.
730 * VINF_SUCCESS is not allowed.
731 */
732IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
733{
734 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
735
736 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
737 if (rcOldPassUp == VINF_SUCCESS)
738 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
739 /* If both are EM scheduling codes, use EM priority rules. */
740 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
741 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
742 {
743 if (rcPassUp < rcOldPassUp)
744 {
745 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
746 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
747 }
748 else
749 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
750 }
751 /* Override EM scheduling with specific status code. */
752 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
753 {
754 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
755 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
756 }
757 /* Don't override specific status code, first come first served. */
758 else
759 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
760 return VINF_SUCCESS;
761}
762
763
764/**
765 * Calculates the CPU mode.
766 *
767 * This is mainly for updating IEMCPU::enmCpuMode.
768 *
769 * @returns CPU mode.
770 * @param pCtx The register context for the CPU.
771 */
772DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
773{
774 if (CPUMIsGuestIn64BitCodeEx(pCtx))
775 return IEMMODE_64BIT;
776 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
777 return IEMMODE_32BIT;
778 return IEMMODE_16BIT;
779}
780
781
782/**
783 * Initializes the execution state.
784 *
785 * @param pIemCpu The per CPU IEM state.
786 * @param fBypassHandlers Whether to bypass access handlers.
787 *
788 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
789 * side-effects in strict builds.
790 */
791DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
792{
793 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
794 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
795
796 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
797 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
798
799#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
800 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
801 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
802 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
804 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
805 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
807 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
808#endif
809
810#ifdef VBOX_WITH_RAW_MODE_NOT_R0
811 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
812#endif
813 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
814 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
815#ifdef VBOX_STRICT
816 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
817 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
818 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
819 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
820 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
821 pIemCpu->uRexReg = 127;
822 pIemCpu->uRexB = 127;
823 pIemCpu->uRexIndex = 127;
824 pIemCpu->iEffSeg = 127;
825 pIemCpu->offOpcode = 127;
826 pIemCpu->cbOpcode = 127;
827#endif
828
829 pIemCpu->cActiveMappings = 0;
830 pIemCpu->iNextMapping = 0;
831 pIemCpu->rcPassUp = VINF_SUCCESS;
832 pIemCpu->fBypassHandlers = fBypassHandlers;
833#ifdef VBOX_WITH_RAW_MODE_NOT_R0
834 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
835 && pCtx->cs.u64Base == 0
836 && pCtx->cs.u32Limit == UINT32_MAX
837 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
838 if (!pIemCpu->fInPatchCode)
839 CPUMRawLeave(pVCpu, VINF_SUCCESS);
840#endif
841
842#ifdef IEM_VERIFICATION_MODE_FULL
843 pIemCpu->fNoRemSavedByExec = pIemCpu->fNoRem;
844 pIemCpu->fNoRem = true;
845#endif
846}
847
848
849/**
850 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
851 *
852 * @param pIemCpu The per CPU IEM state.
853 */
854DECLINLINE(void) iemUninitExec(PIEMCPU pIemCpu)
855{
856#ifdef IEM_VERIFICATION_MODE_FULL
857 pIemCpu->fNoRem = pIemCpu->fNoRemSavedByExec;
858#endif
859#ifdef VBOX_STRICT
860 pIemCpu->cbOpcode = 0;
861#else
862 NOREF(pIemCpu);
863#endif
864}
865
866
867/**
868 * Initializes the decoder state.
869 *
870 * @param pIemCpu The per CPU IEM state.
871 * @param fBypassHandlers Whether to bypass access handlers.
872 */
873DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
874{
875 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
876 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
877
878 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
879 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
880
881#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
882 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
883 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
885 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
886 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
887 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
888 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
889 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
890#endif
891
892#ifdef VBOX_WITH_RAW_MODE_NOT_R0
893 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
894#endif
895 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
896#ifdef IEM_VERIFICATION_MODE_FULL
897 if (pIemCpu->uInjectCpl != UINT8_MAX)
898 pIemCpu->uCpl = pIemCpu->uInjectCpl;
899#endif
900 IEMMODE enmMode = iemCalcCpuMode(pCtx);
901 pIemCpu->enmCpuMode = enmMode;
902 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
903 pIemCpu->enmEffAddrMode = enmMode;
904 if (enmMode != IEMMODE_64BIT)
905 {
906 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
907 pIemCpu->enmEffOpSize = enmMode;
908 }
909 else
910 {
911 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
912 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
913 }
914 pIemCpu->fPrefixes = 0;
915 pIemCpu->uRexReg = 0;
916 pIemCpu->uRexB = 0;
917 pIemCpu->uRexIndex = 0;
918 pIemCpu->iEffSeg = X86_SREG_DS;
919 pIemCpu->offOpcode = 0;
920 pIemCpu->cbOpcode = 0;
921 pIemCpu->cActiveMappings = 0;
922 pIemCpu->iNextMapping = 0;
923 pIemCpu->rcPassUp = VINF_SUCCESS;
924 pIemCpu->fBypassHandlers = fBypassHandlers;
925#ifdef VBOX_WITH_RAW_MODE_NOT_R0
926 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
927 && pCtx->cs.u64Base == 0
928 && pCtx->cs.u32Limit == UINT32_MAX
929 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
930 if (!pIemCpu->fInPatchCode)
931 CPUMRawLeave(pVCpu, VINF_SUCCESS);
932#endif
933
934#ifdef DBGFTRACE_ENABLED
935 switch (enmMode)
936 {
937 case IEMMODE_64BIT:
938 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
939 break;
940 case IEMMODE_32BIT:
941 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
942 break;
943 case IEMMODE_16BIT:
944 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
945 break;
946 }
947#endif
948}
949
950
951/**
952 * Prefetch opcodes the first time when starting executing.
953 *
954 * @returns Strict VBox status code.
955 * @param pIemCpu The IEM state.
956 * @param fBypassHandlers Whether to bypass access handlers.
957 */
958IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
959{
960#ifdef IEM_VERIFICATION_MODE_FULL
961 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
962#endif
963 iemInitDecoder(pIemCpu, fBypassHandlers);
964
965 /*
966 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
967 *
968 * First translate CS:rIP to a physical address.
969 */
970 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
971 uint32_t cbToTryRead;
972 RTGCPTR GCPtrPC;
973 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
974 {
975 cbToTryRead = PAGE_SIZE;
976 GCPtrPC = pCtx->rip;
977 if (!IEM_IS_CANONICAL(GCPtrPC))
978 return iemRaiseGeneralProtectionFault0(pIemCpu);
979 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
980 }
981 else
982 {
983 uint32_t GCPtrPC32 = pCtx->eip;
984 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
985 if (GCPtrPC32 > pCtx->cs.u32Limit)
986 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
987 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
988 if (!cbToTryRead) /* overflowed */
989 {
990 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
991 cbToTryRead = UINT32_MAX;
992 }
993 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
994 Assert(GCPtrPC <= UINT32_MAX);
995 }
996
997#ifdef VBOX_WITH_RAW_MODE_NOT_R0
998 /* Allow interpretation of patch manager code blocks since they can for
999 instance throw #PFs for perfectly good reasons. */
1000 if (pIemCpu->fInPatchCode)
1001 {
1002 size_t cbRead = 0;
1003 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
1004 AssertRCReturn(rc, rc);
1005 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1006 return VINF_SUCCESS;
1007 }
1008#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1009
1010 RTGCPHYS GCPhys;
1011 uint64_t fFlags;
1012 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
1013 if (RT_FAILURE(rc))
1014 {
1015 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1016 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1017 }
1018 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1019 {
1020 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1021 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1022 }
1023 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1024 {
1025 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1026 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1027 }
1028 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1029 /** @todo Check reserved bits and such stuff. PGM is better at doing
1030 * that, so do it when implementing the guest virtual address
1031 * TLB... */
1032
1033#ifdef IEM_VERIFICATION_MODE_FULL
1034 /*
1035 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1036 * instruction.
1037 */
1038 /** @todo optimize this differently by not using PGMPhysRead. */
1039 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1040 pIemCpu->GCPhysOpcodes = GCPhys;
1041 if ( offPrevOpcodes < cbOldOpcodes
1042 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1043 {
1044 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1045 Assert(cbNew <= RT_ELEMENTS(pIemCpu->abOpcode));
1046 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1047 pIemCpu->cbOpcode = cbNew;
1048 return VINF_SUCCESS;
1049 }
1050#endif
1051
1052 /*
1053 * Read the bytes at this address.
1054 */
1055 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1056#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1057 size_t cbActual;
1058 if ( PATMIsEnabled(pVM)
1059 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1060 {
1061 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1062 Assert(cbActual > 0);
1063 pIemCpu->cbOpcode = (uint8_t)cbActual;
1064 }
1065 else
1066#endif
1067 {
1068 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1069 if (cbToTryRead > cbLeftOnPage)
1070 cbToTryRead = cbLeftOnPage;
1071 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1072 cbToTryRead = sizeof(pIemCpu->abOpcode);
1073
1074 if (!pIemCpu->fBypassHandlers)
1075 {
1076 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1077 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1078 { /* likely */ }
1079 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1080 {
1081 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1082 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1083 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1084 }
1085 else
1086 {
1087 Log((RT_SUCCESS(rcStrict)
1088 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1089 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1090 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1091 return rcStrict;
1092 }
1093 }
1094 else
1095 {
1096 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1097 if (RT_SUCCESS(rc))
1098 { /* likely */ }
1099 else
1100 {
1101 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1102 GCPtrPC, GCPhys, rc, cbToTryRead));
1103 return rc;
1104 }
1105 }
1106 pIemCpu->cbOpcode = cbToTryRead;
1107 }
1108
1109 return VINF_SUCCESS;
1110}
1111
1112
1113/**
1114 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1115 * exception if it fails.
1116 *
1117 * @returns Strict VBox status code.
1118 * @param pIemCpu The IEM state.
1119 * @param cbMin The minimum number of bytes relative offOpcode
1120 * that must be read.
1121 */
1122IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1123{
1124 /*
1125 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1126 *
1127 * First translate CS:rIP to a physical address.
1128 */
1129 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1130 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1131 uint32_t cbToTryRead;
1132 RTGCPTR GCPtrNext;
1133 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1134 {
1135 cbToTryRead = PAGE_SIZE;
1136 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1137 if (!IEM_IS_CANONICAL(GCPtrNext))
1138 return iemRaiseGeneralProtectionFault0(pIemCpu);
1139 }
1140 else
1141 {
1142 uint32_t GCPtrNext32 = pCtx->eip;
1143 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1144 GCPtrNext32 += pIemCpu->cbOpcode;
1145 if (GCPtrNext32 > pCtx->cs.u32Limit)
1146 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1147 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1148 if (!cbToTryRead) /* overflowed */
1149 {
1150 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1151 cbToTryRead = UINT32_MAX;
1152 /** @todo check out wrapping around the code segment. */
1153 }
1154 if (cbToTryRead < cbMin - cbLeft)
1155 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1156 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1157 }
1158
1159 /* Only read up to the end of the page, and make sure we don't read more
1160 than the opcode buffer can hold. */
1161 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1162 if (cbToTryRead > cbLeftOnPage)
1163 cbToTryRead = cbLeftOnPage;
1164 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1165 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1166/** @todo r=bird: Convert assertion into undefined opcode exception? */
1167 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1168
1169#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1170 /* Allow interpretation of patch manager code blocks since they can for
1171 instance throw #PFs for perfectly good reasons. */
1172 if (pIemCpu->fInPatchCode)
1173 {
1174 size_t cbRead = 0;
1175 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1176 AssertRCReturn(rc, rc);
1177 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1178 return VINF_SUCCESS;
1179 }
1180#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1181
1182 RTGCPHYS GCPhys;
1183 uint64_t fFlags;
1184 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1185 if (RT_FAILURE(rc))
1186 {
1187 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1188 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1189 }
1190 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1191 {
1192 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1193 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1194 }
1195 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1196 {
1197 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1198 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1199 }
1200 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1201 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1202 /** @todo Check reserved bits and such stuff. PGM is better at doing
1203 * that, so do it when implementing the guest virtual address
1204 * TLB... */
1205
1206 /*
1207 * Read the bytes at this address.
1208 *
1209 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1210 * and since PATM should only patch the start of an instruction there
1211 * should be no need to check again here.
1212 */
1213 if (!pIemCpu->fBypassHandlers)
1214 {
1215 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1216 cbToTryRead, PGMACCESSORIGIN_IEM);
1217 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1218 { /* likely */ }
1219 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1220 {
1221 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1222 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1223 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1224 }
1225 else
1226 {
1227 Log((RT_SUCCESS(rcStrict)
1228 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1229 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1230 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1231 return rcStrict;
1232 }
1233 }
1234 else
1235 {
1236 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1237 if (RT_SUCCESS(rc))
1238 { /* likely */ }
1239 else
1240 {
1241 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1242 return rc;
1243 }
1244 }
1245 pIemCpu->cbOpcode += cbToTryRead;
1246 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1247
1248 return VINF_SUCCESS;
1249}
1250
1251
1252/**
1253 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1254 *
1255 * @returns Strict VBox status code.
1256 * @param pIemCpu The IEM state.
1257 * @param pb Where to return the opcode byte.
1258 */
1259DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1260{
1261 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1262 if (rcStrict == VINF_SUCCESS)
1263 {
1264 uint8_t offOpcode = pIemCpu->offOpcode;
1265 *pb = pIemCpu->abOpcode[offOpcode];
1266 pIemCpu->offOpcode = offOpcode + 1;
1267 }
1268 else
1269 *pb = 0;
1270 return rcStrict;
1271}
1272
1273
1274/**
1275 * Fetches the next opcode byte.
1276 *
1277 * @returns Strict VBox status code.
1278 * @param pIemCpu The IEM state.
1279 * @param pu8 Where to return the opcode byte.
1280 */
1281DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1282{
1283 uint8_t const offOpcode = pIemCpu->offOpcode;
1284 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1285 {
1286 *pu8 = pIemCpu->abOpcode[offOpcode];
1287 pIemCpu->offOpcode = offOpcode + 1;
1288 return VINF_SUCCESS;
1289 }
1290 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1291}
1292
1293
1294/**
1295 * Fetches the next opcode byte, returns automatically on failure.
1296 *
1297 * @param a_pu8 Where to return the opcode byte.
1298 * @remark Implicitly references pIemCpu.
1299 */
1300#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1301 do \
1302 { \
1303 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1304 if (rcStrict2 != VINF_SUCCESS) \
1305 return rcStrict2; \
1306 } while (0)
1307
1308
1309/**
1310 * Fetches the next signed byte from the opcode stream.
1311 *
1312 * @returns Strict VBox status code.
1313 * @param pIemCpu The IEM state.
1314 * @param pi8 Where to return the signed byte.
1315 */
1316DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1317{
1318 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1319}
1320
1321
1322/**
1323 * Fetches the next signed byte from the opcode stream, returning automatically
1324 * on failure.
1325 *
1326 * @param a_pi8 Where to return the signed byte.
1327 * @remark Implicitly references pIemCpu.
1328 */
1329#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1330 do \
1331 { \
1332 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1333 if (rcStrict2 != VINF_SUCCESS) \
1334 return rcStrict2; \
1335 } while (0)
1336
1337
1338/**
1339 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1340 *
1341 * @returns Strict VBox status code.
1342 * @param pIemCpu The IEM state.
1343 * @param pu16 Where to return the opcode dword.
1344 */
1345DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1346{
1347 uint8_t u8;
1348 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1349 if (rcStrict == VINF_SUCCESS)
1350 *pu16 = (int8_t)u8;
1351 return rcStrict;
1352}
1353
1354
1355/**
1356 * Fetches the next signed byte from the opcode stream, extending it to
1357 * unsigned 16-bit.
1358 *
1359 * @returns Strict VBox status code.
1360 * @param pIemCpu The IEM state.
1361 * @param pu16 Where to return the unsigned word.
1362 */
1363DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1364{
1365 uint8_t const offOpcode = pIemCpu->offOpcode;
1366 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1367 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1368
1369 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1370 pIemCpu->offOpcode = offOpcode + 1;
1371 return VINF_SUCCESS;
1372}
1373
1374
1375/**
1376 * Fetches the next signed byte from the opcode stream and sign-extending it to
1377 * a word, returning automatically on failure.
1378 *
1379 * @param a_pu16 Where to return the word.
1380 * @remark Implicitly references pIemCpu.
1381 */
1382#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1383 do \
1384 { \
1385 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1386 if (rcStrict2 != VINF_SUCCESS) \
1387 return rcStrict2; \
1388 } while (0)
1389
1390
1391/**
1392 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1393 *
1394 * @returns Strict VBox status code.
1395 * @param pIemCpu The IEM state.
1396 * @param pu32 Where to return the opcode dword.
1397 */
1398DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1399{
1400 uint8_t u8;
1401 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1402 if (rcStrict == VINF_SUCCESS)
1403 *pu32 = (int8_t)u8;
1404 return rcStrict;
1405}
1406
1407
1408/**
1409 * Fetches the next signed byte from the opcode stream, extending it to
1410 * unsigned 32-bit.
1411 *
1412 * @returns Strict VBox status code.
1413 * @param pIemCpu The IEM state.
1414 * @param pu32 Where to return the unsigned dword.
1415 */
1416DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1417{
1418 uint8_t const offOpcode = pIemCpu->offOpcode;
1419 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1420 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1421
1422 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1423 pIemCpu->offOpcode = offOpcode + 1;
1424 return VINF_SUCCESS;
1425}
1426
1427
1428/**
1429 * Fetches the next signed byte from the opcode stream and sign-extending it to
1430 * a word, returning automatically on failure.
1431 *
1432 * @param a_pu32 Where to return the word.
1433 * @remark Implicitly references pIemCpu.
1434 */
1435#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1436 do \
1437 { \
1438 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1439 if (rcStrict2 != VINF_SUCCESS) \
1440 return rcStrict2; \
1441 } while (0)
1442
1443
1444/**
1445 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1446 *
1447 * @returns Strict VBox status code.
1448 * @param pIemCpu The IEM state.
1449 * @param pu64 Where to return the opcode qword.
1450 */
1451DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1452{
1453 uint8_t u8;
1454 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1455 if (rcStrict == VINF_SUCCESS)
1456 *pu64 = (int8_t)u8;
1457 return rcStrict;
1458}
1459
1460
1461/**
1462 * Fetches the next signed byte from the opcode stream, extending it to
1463 * unsigned 64-bit.
1464 *
1465 * @returns Strict VBox status code.
1466 * @param pIemCpu The IEM state.
1467 * @param pu64 Where to return the unsigned qword.
1468 */
1469DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1470{
1471 uint8_t const offOpcode = pIemCpu->offOpcode;
1472 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1473 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1474
1475 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1476 pIemCpu->offOpcode = offOpcode + 1;
1477 return VINF_SUCCESS;
1478}
1479
1480
1481/**
1482 * Fetches the next signed byte from the opcode stream and sign-extending it to
1483 * a word, returning automatically on failure.
1484 *
1485 * @param a_pu64 Where to return the word.
1486 * @remark Implicitly references pIemCpu.
1487 */
1488#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1489 do \
1490 { \
1491 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1492 if (rcStrict2 != VINF_SUCCESS) \
1493 return rcStrict2; \
1494 } while (0)
1495
1496
1497/**
1498 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1499 *
1500 * @returns Strict VBox status code.
1501 * @param pIemCpu The IEM state.
1502 * @param pu16 Where to return the opcode word.
1503 */
1504DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1505{
1506 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1507 if (rcStrict == VINF_SUCCESS)
1508 {
1509 uint8_t offOpcode = pIemCpu->offOpcode;
1510 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1511 pIemCpu->offOpcode = offOpcode + 2;
1512 }
1513 else
1514 *pu16 = 0;
1515 return rcStrict;
1516}
1517
1518
1519/**
1520 * Fetches the next opcode word.
1521 *
1522 * @returns Strict VBox status code.
1523 * @param pIemCpu The IEM state.
1524 * @param pu16 Where to return the opcode word.
1525 */
1526DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1527{
1528 uint8_t const offOpcode = pIemCpu->offOpcode;
1529 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1530 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1531
1532 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1533 pIemCpu->offOpcode = offOpcode + 2;
1534 return VINF_SUCCESS;
1535}
1536
1537
1538/**
1539 * Fetches the next opcode word, returns automatically on failure.
1540 *
1541 * @param a_pu16 Where to return the opcode word.
1542 * @remark Implicitly references pIemCpu.
1543 */
1544#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1545 do \
1546 { \
1547 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1548 if (rcStrict2 != VINF_SUCCESS) \
1549 return rcStrict2; \
1550 } while (0)
1551
1552
1553/**
1554 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1555 *
1556 * @returns Strict VBox status code.
1557 * @param pIemCpu The IEM state.
1558 * @param pu32 Where to return the opcode double word.
1559 */
1560DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1561{
1562 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1563 if (rcStrict == VINF_SUCCESS)
1564 {
1565 uint8_t offOpcode = pIemCpu->offOpcode;
1566 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1567 pIemCpu->offOpcode = offOpcode + 2;
1568 }
1569 else
1570 *pu32 = 0;
1571 return rcStrict;
1572}
1573
1574
1575/**
1576 * Fetches the next opcode word, zero extending it to a double word.
1577 *
1578 * @returns Strict VBox status code.
1579 * @param pIemCpu The IEM state.
1580 * @param pu32 Where to return the opcode double word.
1581 */
1582DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1583{
1584 uint8_t const offOpcode = pIemCpu->offOpcode;
1585 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1586 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1587
1588 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1589 pIemCpu->offOpcode = offOpcode + 2;
1590 return VINF_SUCCESS;
1591}
1592
1593
1594/**
1595 * Fetches the next opcode word and zero extends it to a double word, returns
1596 * automatically on failure.
1597 *
1598 * @param a_pu32 Where to return the opcode double word.
1599 * @remark Implicitly references pIemCpu.
1600 */
1601#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1602 do \
1603 { \
1604 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1605 if (rcStrict2 != VINF_SUCCESS) \
1606 return rcStrict2; \
1607 } while (0)
1608
1609
1610/**
1611 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1612 *
1613 * @returns Strict VBox status code.
1614 * @param pIemCpu The IEM state.
1615 * @param pu64 Where to return the opcode quad word.
1616 */
1617DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1618{
1619 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1620 if (rcStrict == VINF_SUCCESS)
1621 {
1622 uint8_t offOpcode = pIemCpu->offOpcode;
1623 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1624 pIemCpu->offOpcode = offOpcode + 2;
1625 }
1626 else
1627 *pu64 = 0;
1628 return rcStrict;
1629}
1630
1631
1632/**
1633 * Fetches the next opcode word, zero extending it to a quad word.
1634 *
1635 * @returns Strict VBox status code.
1636 * @param pIemCpu The IEM state.
1637 * @param pu64 Where to return the opcode quad word.
1638 */
1639DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1640{
1641 uint8_t const offOpcode = pIemCpu->offOpcode;
1642 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1643 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1644
1645 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1646 pIemCpu->offOpcode = offOpcode + 2;
1647 return VINF_SUCCESS;
1648}
1649
1650
1651/**
1652 * Fetches the next opcode word and zero extends it to a quad word, returns
1653 * automatically on failure.
1654 *
1655 * @param a_pu64 Where to return the opcode quad word.
1656 * @remark Implicitly references pIemCpu.
1657 */
1658#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1659 do \
1660 { \
1661 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1662 if (rcStrict2 != VINF_SUCCESS) \
1663 return rcStrict2; \
1664 } while (0)
1665
1666
1667/**
1668 * Fetches the next signed word from the opcode stream.
1669 *
1670 * @returns Strict VBox status code.
1671 * @param pIemCpu The IEM state.
1672 * @param pi16 Where to return the signed word.
1673 */
1674DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1675{
1676 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1677}
1678
1679
1680/**
1681 * Fetches the next signed word from the opcode stream, returning automatically
1682 * on failure.
1683 *
1684 * @param a_pi16 Where to return the signed word.
1685 * @remark Implicitly references pIemCpu.
1686 */
1687#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1688 do \
1689 { \
1690 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1691 if (rcStrict2 != VINF_SUCCESS) \
1692 return rcStrict2; \
1693 } while (0)
1694
1695
1696/**
1697 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1698 *
1699 * @returns Strict VBox status code.
1700 * @param pIemCpu The IEM state.
1701 * @param pu32 Where to return the opcode dword.
1702 */
1703DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1704{
1705 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1706 if (rcStrict == VINF_SUCCESS)
1707 {
1708 uint8_t offOpcode = pIemCpu->offOpcode;
1709 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1710 pIemCpu->abOpcode[offOpcode + 1],
1711 pIemCpu->abOpcode[offOpcode + 2],
1712 pIemCpu->abOpcode[offOpcode + 3]);
1713 pIemCpu->offOpcode = offOpcode + 4;
1714 }
1715 else
1716 *pu32 = 0;
1717 return rcStrict;
1718}
1719
1720
1721/**
1722 * Fetches the next opcode dword.
1723 *
1724 * @returns Strict VBox status code.
1725 * @param pIemCpu The IEM state.
1726 * @param pu32 Where to return the opcode double word.
1727 */
1728DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1729{
1730 uint8_t const offOpcode = pIemCpu->offOpcode;
1731 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1732 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1733
1734 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1735 pIemCpu->abOpcode[offOpcode + 1],
1736 pIemCpu->abOpcode[offOpcode + 2],
1737 pIemCpu->abOpcode[offOpcode + 3]);
1738 pIemCpu->offOpcode = offOpcode + 4;
1739 return VINF_SUCCESS;
1740}
1741
1742
1743/**
1744 * Fetches the next opcode dword, returns automatically on failure.
1745 *
1746 * @param a_pu32 Where to return the opcode dword.
1747 * @remark Implicitly references pIemCpu.
1748 */
1749#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1750 do \
1751 { \
1752 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1753 if (rcStrict2 != VINF_SUCCESS) \
1754 return rcStrict2; \
1755 } while (0)
1756
1757
1758/**
1759 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1760 *
1761 * @returns Strict VBox status code.
1762 * @param pIemCpu The IEM state.
1763 * @param pu64 Where to return the opcode dword.
1764 */
1765DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1766{
1767 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1768 if (rcStrict == VINF_SUCCESS)
1769 {
1770 uint8_t offOpcode = pIemCpu->offOpcode;
1771 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1772 pIemCpu->abOpcode[offOpcode + 1],
1773 pIemCpu->abOpcode[offOpcode + 2],
1774 pIemCpu->abOpcode[offOpcode + 3]);
1775 pIemCpu->offOpcode = offOpcode + 4;
1776 }
1777 else
1778 *pu64 = 0;
1779 return rcStrict;
1780}
1781
1782
1783/**
1784 * Fetches the next opcode dword, zero extending it to a quad word.
1785 *
1786 * @returns Strict VBox status code.
1787 * @param pIemCpu The IEM state.
1788 * @param pu64 Where to return the opcode quad word.
1789 */
1790DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1791{
1792 uint8_t const offOpcode = pIemCpu->offOpcode;
1793 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1794 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1795
1796 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1797 pIemCpu->abOpcode[offOpcode + 1],
1798 pIemCpu->abOpcode[offOpcode + 2],
1799 pIemCpu->abOpcode[offOpcode + 3]);
1800 pIemCpu->offOpcode = offOpcode + 4;
1801 return VINF_SUCCESS;
1802}
1803
1804
1805/**
1806 * Fetches the next opcode dword and zero extends it to a quad word, returns
1807 * automatically on failure.
1808 *
1809 * @param a_pu64 Where to return the opcode quad word.
1810 * @remark Implicitly references pIemCpu.
1811 */
1812#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1813 do \
1814 { \
1815 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1816 if (rcStrict2 != VINF_SUCCESS) \
1817 return rcStrict2; \
1818 } while (0)
1819
1820
1821/**
1822 * Fetches the next signed double word from the opcode stream.
1823 *
1824 * @returns Strict VBox status code.
1825 * @param pIemCpu The IEM state.
1826 * @param pi32 Where to return the signed double word.
1827 */
1828DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1829{
1830 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1831}
1832
1833/**
1834 * Fetches the next signed double word from the opcode stream, returning
1835 * automatically on failure.
1836 *
1837 * @param a_pi32 Where to return the signed double word.
1838 * @remark Implicitly references pIemCpu.
1839 */
1840#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1841 do \
1842 { \
1843 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1844 if (rcStrict2 != VINF_SUCCESS) \
1845 return rcStrict2; \
1846 } while (0)
1847
1848
1849/**
1850 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1851 *
1852 * @returns Strict VBox status code.
1853 * @param pIemCpu The IEM state.
1854 * @param pu64 Where to return the opcode qword.
1855 */
1856DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1857{
1858 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1859 if (rcStrict == VINF_SUCCESS)
1860 {
1861 uint8_t offOpcode = pIemCpu->offOpcode;
1862 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1863 pIemCpu->abOpcode[offOpcode + 1],
1864 pIemCpu->abOpcode[offOpcode + 2],
1865 pIemCpu->abOpcode[offOpcode + 3]);
1866 pIemCpu->offOpcode = offOpcode + 4;
1867 }
1868 else
1869 *pu64 = 0;
1870 return rcStrict;
1871}
1872
1873
1874/**
1875 * Fetches the next opcode dword, sign extending it into a quad word.
1876 *
1877 * @returns Strict VBox status code.
1878 * @param pIemCpu The IEM state.
1879 * @param pu64 Where to return the opcode quad word.
1880 */
1881DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1882{
1883 uint8_t const offOpcode = pIemCpu->offOpcode;
1884 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1885 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1886
1887 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1888 pIemCpu->abOpcode[offOpcode + 1],
1889 pIemCpu->abOpcode[offOpcode + 2],
1890 pIemCpu->abOpcode[offOpcode + 3]);
1891 *pu64 = i32;
1892 pIemCpu->offOpcode = offOpcode + 4;
1893 return VINF_SUCCESS;
1894}
1895
1896
1897/**
1898 * Fetches the next opcode double word and sign extends it to a quad word,
1899 * returns automatically on failure.
1900 *
1901 * @param a_pu64 Where to return the opcode quad word.
1902 * @remark Implicitly references pIemCpu.
1903 */
1904#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1905 do \
1906 { \
1907 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1908 if (rcStrict2 != VINF_SUCCESS) \
1909 return rcStrict2; \
1910 } while (0)
1911
1912
1913/**
1914 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1915 *
1916 * @returns Strict VBox status code.
1917 * @param pIemCpu The IEM state.
1918 * @param pu64 Where to return the opcode qword.
1919 */
1920DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1921{
1922 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1923 if (rcStrict == VINF_SUCCESS)
1924 {
1925 uint8_t offOpcode = pIemCpu->offOpcode;
1926 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1927 pIemCpu->abOpcode[offOpcode + 1],
1928 pIemCpu->abOpcode[offOpcode + 2],
1929 pIemCpu->abOpcode[offOpcode + 3],
1930 pIemCpu->abOpcode[offOpcode + 4],
1931 pIemCpu->abOpcode[offOpcode + 5],
1932 pIemCpu->abOpcode[offOpcode + 6],
1933 pIemCpu->abOpcode[offOpcode + 7]);
1934 pIemCpu->offOpcode = offOpcode + 8;
1935 }
1936 else
1937 *pu64 = 0;
1938 return rcStrict;
1939}
1940
1941
1942/**
1943 * Fetches the next opcode qword.
1944 *
1945 * @returns Strict VBox status code.
1946 * @param pIemCpu The IEM state.
1947 * @param pu64 Where to return the opcode qword.
1948 */
1949DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1950{
1951 uint8_t const offOpcode = pIemCpu->offOpcode;
1952 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1953 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1954
1955 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1956 pIemCpu->abOpcode[offOpcode + 1],
1957 pIemCpu->abOpcode[offOpcode + 2],
1958 pIemCpu->abOpcode[offOpcode + 3],
1959 pIemCpu->abOpcode[offOpcode + 4],
1960 pIemCpu->abOpcode[offOpcode + 5],
1961 pIemCpu->abOpcode[offOpcode + 6],
1962 pIemCpu->abOpcode[offOpcode + 7]);
1963 pIemCpu->offOpcode = offOpcode + 8;
1964 return VINF_SUCCESS;
1965}
1966
1967
1968/**
1969 * Fetches the next opcode quad word, returns automatically on failure.
1970 *
1971 * @param a_pu64 Where to return the opcode quad word.
1972 * @remark Implicitly references pIemCpu.
1973 */
1974#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1975 do \
1976 { \
1977 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1978 if (rcStrict2 != VINF_SUCCESS) \
1979 return rcStrict2; \
1980 } while (0)
1981
1982
1983/** @name Misc Worker Functions.
1984 * @{
1985 */
1986
1987
1988/**
1989 * Validates a new SS segment.
1990 *
1991 * @returns VBox strict status code.
1992 * @param pIemCpu The IEM per CPU instance data.
1993 * @param pCtx The CPU context.
1994 * @param NewSS The new SS selctor.
1995 * @param uCpl The CPL to load the stack for.
1996 * @param pDesc Where to return the descriptor.
1997 */
1998IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1999{
2000 NOREF(pCtx);
2001
2002 /* Null selectors are not allowed (we're not called for dispatching
2003 interrupts with SS=0 in long mode). */
2004 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2005 {
2006 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2007 return iemRaiseTaskSwitchFault0(pIemCpu);
2008 }
2009
2010 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2011 if ((NewSS & X86_SEL_RPL) != uCpl)
2012 {
2013 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2014 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2015 }
2016
2017 /*
2018 * Read the descriptor.
2019 */
2020 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
2021 if (rcStrict != VINF_SUCCESS)
2022 return rcStrict;
2023
2024 /*
2025 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2026 */
2027 if (!pDesc->Legacy.Gen.u1DescType)
2028 {
2029 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2030 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2031 }
2032
2033 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2034 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2035 {
2036 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2037 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2038 }
2039 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2040 {
2041 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2042 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2043 }
2044
2045 /* Is it there? */
2046 /** @todo testcase: Is this checked before the canonical / limit check below? */
2047 if (!pDesc->Legacy.Gen.u1Present)
2048 {
2049 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2050 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2051 }
2052
2053 return VINF_SUCCESS;
2054}
2055
2056
2057/**
2058 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2059 * not.
2060 *
2061 * @param a_pIemCpu The IEM per CPU data.
2062 * @param a_pCtx The CPU context.
2063 */
2064#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2065# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2066 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2067 ? (a_pCtx)->eflags.u \
2068 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2069#else
2070# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2071 ( (a_pCtx)->eflags.u )
2072#endif
2073
2074/**
2075 * Updates the EFLAGS in the correct manner wrt. PATM.
2076 *
2077 * @param a_pIemCpu The IEM per CPU data.
2078 * @param a_pCtx The CPU context.
2079 * @param a_fEfl The new EFLAGS.
2080 */
2081#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2082# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2083 do { \
2084 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2085 (a_pCtx)->eflags.u = (a_fEfl); \
2086 else \
2087 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2088 } while (0)
2089#else
2090# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2091 do { \
2092 (a_pCtx)->eflags.u = (a_fEfl); \
2093 } while (0)
2094#endif
2095
2096
2097/** @} */
2098
2099/** @name Raising Exceptions.
2100 *
2101 * @{
2102 */
2103
2104/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2105 * @{ */
2106/** CPU exception. */
2107#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2108/** External interrupt (from PIC, APIC, whatever). */
2109#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2110/** Software interrupt (int or into, not bound).
2111 * Returns to the following instruction */
2112#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2113/** Takes an error code. */
2114#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2115/** Takes a CR2. */
2116#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2117/** Generated by the breakpoint instruction. */
2118#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2119/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2120#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2121/** @} */
2122
2123
2124/**
2125 * Loads the specified stack far pointer from the TSS.
2126 *
2127 * @returns VBox strict status code.
2128 * @param pIemCpu The IEM per CPU instance data.
2129 * @param pCtx The CPU context.
2130 * @param uCpl The CPL to load the stack for.
2131 * @param pSelSS Where to return the new stack segment.
2132 * @param puEsp Where to return the new stack pointer.
2133 */
2134IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2135 PRTSEL pSelSS, uint32_t *puEsp)
2136{
2137 VBOXSTRICTRC rcStrict;
2138 Assert(uCpl < 4);
2139
2140 switch (pCtx->tr.Attr.n.u4Type)
2141 {
2142 /*
2143 * 16-bit TSS (X86TSS16).
2144 */
2145 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2146 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2147 {
2148 uint32_t off = uCpl * 4 + 2;
2149 if (off + 4 <= pCtx->tr.u32Limit)
2150 {
2151 /** @todo check actual access pattern here. */
2152 uint32_t u32Tmp = 0; /* gcc maybe... */
2153 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2154 if (rcStrict == VINF_SUCCESS)
2155 {
2156 *puEsp = RT_LOWORD(u32Tmp);
2157 *pSelSS = RT_HIWORD(u32Tmp);
2158 return VINF_SUCCESS;
2159 }
2160 }
2161 else
2162 {
2163 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2164 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2165 }
2166 break;
2167 }
2168
2169 /*
2170 * 32-bit TSS (X86TSS32).
2171 */
2172 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2173 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2174 {
2175 uint32_t off = uCpl * 8 + 4;
2176 if (off + 7 <= pCtx->tr.u32Limit)
2177 {
2178/** @todo check actual access pattern here. */
2179 uint64_t u64Tmp;
2180 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2181 if (rcStrict == VINF_SUCCESS)
2182 {
2183 *puEsp = u64Tmp & UINT32_MAX;
2184 *pSelSS = (RTSEL)(u64Tmp >> 32);
2185 return VINF_SUCCESS;
2186 }
2187 }
2188 else
2189 {
2190 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2191 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2192 }
2193 break;
2194 }
2195
2196 default:
2197 AssertFailed();
2198 rcStrict = VERR_IEM_IPE_4;
2199 break;
2200 }
2201
2202 *puEsp = 0; /* make gcc happy */
2203 *pSelSS = 0; /* make gcc happy */
2204 return rcStrict;
2205}
2206
2207
2208/**
2209 * Loads the specified stack pointer from the 64-bit TSS.
2210 *
2211 * @returns VBox strict status code.
2212 * @param pIemCpu The IEM per CPU instance data.
2213 * @param pCtx The CPU context.
2214 * @param uCpl The CPL to load the stack for.
2215 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2216 * @param puRsp Where to return the new stack pointer.
2217 */
2218IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2219{
2220 Assert(uCpl < 4);
2221 Assert(uIst < 8);
2222 *puRsp = 0; /* make gcc happy */
2223
2224 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2225
2226 uint32_t off;
2227 if (uIst)
2228 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2229 else
2230 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2231 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2232 {
2233 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2234 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2235 }
2236
2237 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2238}
2239
2240
2241/**
2242 * Adjust the CPU state according to the exception being raised.
2243 *
2244 * @param pCtx The CPU context.
2245 * @param u8Vector The exception that has been raised.
2246 */
2247DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2248{
2249 switch (u8Vector)
2250 {
2251 case X86_XCPT_DB:
2252 pCtx->dr[7] &= ~X86_DR7_GD;
2253 break;
2254 /** @todo Read the AMD and Intel exception reference... */
2255 }
2256}
2257
2258
2259/**
2260 * Implements exceptions and interrupts for real mode.
2261 *
2262 * @returns VBox strict status code.
2263 * @param pIemCpu The IEM per CPU instance data.
2264 * @param pCtx The CPU context.
2265 * @param cbInstr The number of bytes to offset rIP by in the return
2266 * address.
2267 * @param u8Vector The interrupt / exception vector number.
2268 * @param fFlags The flags.
2269 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2270 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2271 */
2272IEM_STATIC VBOXSTRICTRC
2273iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2274 PCPUMCTX pCtx,
2275 uint8_t cbInstr,
2276 uint8_t u8Vector,
2277 uint32_t fFlags,
2278 uint16_t uErr,
2279 uint64_t uCr2)
2280{
2281 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2282 NOREF(uErr); NOREF(uCr2);
2283
2284 /*
2285 * Read the IDT entry.
2286 */
2287 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2288 {
2289 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2290 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2291 }
2292 RTFAR16 Idte;
2293 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2294 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2295 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2296 return rcStrict;
2297
2298 /*
2299 * Push the stack frame.
2300 */
2301 uint16_t *pu16Frame;
2302 uint64_t uNewRsp;
2303 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2304 if (rcStrict != VINF_SUCCESS)
2305 return rcStrict;
2306
2307 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2308#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2309 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2310 if (pIemCpu->uTargetCpu <= IEMTARGETCPU_186)
2311 fEfl |= UINT16_C(0xf000);
2312#endif
2313 pu16Frame[2] = (uint16_t)fEfl;
2314 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2315 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2316 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2317 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2318 return rcStrict;
2319
2320 /*
2321 * Load the vector address into cs:ip and make exception specific state
2322 * adjustments.
2323 */
2324 pCtx->cs.Sel = Idte.sel;
2325 pCtx->cs.ValidSel = Idte.sel;
2326 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2327 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2328 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2329 pCtx->rip = Idte.off;
2330 fEfl &= ~X86_EFL_IF;
2331 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2332
2333 /** @todo do we actually do this in real mode? */
2334 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2335 iemRaiseXcptAdjustState(pCtx, u8Vector);
2336
2337 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2338}
2339
2340
2341/**
2342 * Loads a NULL data selector into when coming from V8086 mode.
2343 *
2344 * @param pIemCpu The IEM per CPU instance data.
2345 * @param pSReg Pointer to the segment register.
2346 */
2347IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2348{
2349 pSReg->Sel = 0;
2350 pSReg->ValidSel = 0;
2351 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2352 {
2353 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2354 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2355 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2356 }
2357 else
2358 {
2359 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2360 /** @todo check this on AMD-V */
2361 pSReg->u64Base = 0;
2362 pSReg->u32Limit = 0;
2363 }
2364}
2365
2366
2367/**
2368 * Loads a segment selector during a task switch in V8086 mode.
2369 *
2370 * @param pIemCpu The IEM per CPU instance data.
2371 * @param pSReg Pointer to the segment register.
2372 * @param uSel The selector value to load.
2373 */
2374IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2375{
2376 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2377 pSReg->Sel = uSel;
2378 pSReg->ValidSel = uSel;
2379 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2380 pSReg->u64Base = uSel << 4;
2381 pSReg->u32Limit = 0xffff;
2382 pSReg->Attr.u = 0xf3;
2383}
2384
2385
2386/**
2387 * Loads a NULL data selector into a selector register, both the hidden and
2388 * visible parts, in protected mode.
2389 *
2390 * @param pIemCpu The IEM state of the calling EMT.
2391 * @param pSReg Pointer to the segment register.
2392 * @param uRpl The RPL.
2393 */
2394IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2395{
2396 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2397 * data selector in protected mode. */
2398 pSReg->Sel = uRpl;
2399 pSReg->ValidSel = uRpl;
2400 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2401 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2402 {
2403 /* VT-x (Intel 3960x) observed doing something like this. */
2404 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2405 pSReg->u32Limit = UINT32_MAX;
2406 pSReg->u64Base = 0;
2407 }
2408 else
2409 {
2410 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2411 pSReg->u32Limit = 0;
2412 pSReg->u64Base = 0;
2413 }
2414}
2415
2416
2417/**
2418 * Loads a segment selector during a task switch in protected mode.
2419 *
2420 * In this task switch scenario, we would throw \#TS exceptions rather than
2421 * \#GPs.
2422 *
2423 * @returns VBox strict status code.
2424 * @param pIemCpu The IEM per CPU instance data.
2425 * @param pSReg Pointer to the segment register.
2426 * @param uSel The new selector value.
2427 *
2428 * @remarks This does _not_ handle CS or SS.
2429 * @remarks This expects pIemCpu->uCpl to be up to date.
2430 */
2431IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2432{
2433 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2434
2435 /* Null data selector. */
2436 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2437 {
2438 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2439 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2440 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2441 return VINF_SUCCESS;
2442 }
2443
2444 /* Fetch the descriptor. */
2445 IEMSELDESC Desc;
2446 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2447 if (rcStrict != VINF_SUCCESS)
2448 {
2449 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2450 VBOXSTRICTRC_VAL(rcStrict)));
2451 return rcStrict;
2452 }
2453
2454 /* Must be a data segment or readable code segment. */
2455 if ( !Desc.Legacy.Gen.u1DescType
2456 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2457 {
2458 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2459 Desc.Legacy.Gen.u4Type));
2460 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2461 }
2462
2463 /* Check privileges for data segments and non-conforming code segments. */
2464 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2465 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2466 {
2467 /* The RPL and the new CPL must be less than or equal to the DPL. */
2468 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2469 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2470 {
2471 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2472 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2473 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2474 }
2475 }
2476
2477 /* Is it there? */
2478 if (!Desc.Legacy.Gen.u1Present)
2479 {
2480 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2481 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2482 }
2483
2484 /* The base and limit. */
2485 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2486 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2487
2488 /*
2489 * Ok, everything checked out fine. Now set the accessed bit before
2490 * committing the result into the registers.
2491 */
2492 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2493 {
2494 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2495 if (rcStrict != VINF_SUCCESS)
2496 return rcStrict;
2497 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2498 }
2499
2500 /* Commit */
2501 pSReg->Sel = uSel;
2502 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2503 pSReg->u32Limit = cbLimit;
2504 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2505 pSReg->ValidSel = uSel;
2506 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2507 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2508 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2509
2510 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2511 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2512 return VINF_SUCCESS;
2513}
2514
2515
2516/**
2517 * Performs a task switch.
2518 *
2519 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2520 * caller is responsible for performing the necessary checks (like DPL, TSS
2521 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2522 * reference for JMP, CALL, IRET.
2523 *
2524 * If the task switch is the due to a software interrupt or hardware exception,
2525 * the caller is responsible for validating the TSS selector and descriptor. See
2526 * Intel Instruction reference for INT n.
2527 *
2528 * @returns VBox strict status code.
2529 * @param pIemCpu The IEM per CPU instance data.
2530 * @param pCtx The CPU context.
2531 * @param enmTaskSwitch What caused this task switch.
2532 * @param uNextEip The EIP effective after the task switch.
2533 * @param fFlags The flags.
2534 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2535 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2536 * @param SelTSS The TSS selector of the new task.
2537 * @param pNewDescTSS Pointer to the new TSS descriptor.
2538 */
2539IEM_STATIC VBOXSTRICTRC
2540iemTaskSwitch(PIEMCPU pIemCpu,
2541 PCPUMCTX pCtx,
2542 IEMTASKSWITCH enmTaskSwitch,
2543 uint32_t uNextEip,
2544 uint32_t fFlags,
2545 uint16_t uErr,
2546 uint64_t uCr2,
2547 RTSEL SelTSS,
2548 PIEMSELDESC pNewDescTSS)
2549{
2550 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2551 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2552
2553 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2554 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2555 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2556 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2557 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2558
2559 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2560 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2561
2562 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2563 fIsNewTSS386, pCtx->eip, uNextEip));
2564
2565 /* Update CR2 in case it's a page-fault. */
2566 /** @todo This should probably be done much earlier in IEM/PGM. See
2567 * @bugref{5653#c49}. */
2568 if (fFlags & IEM_XCPT_FLAGS_CR2)
2569 pCtx->cr2 = uCr2;
2570
2571 /*
2572 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2573 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2574 */
2575 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2576 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2577 if (uNewTSSLimit < uNewTSSLimitMin)
2578 {
2579 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2580 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2581 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2582 }
2583
2584 /*
2585 * Check the current TSS limit. The last written byte to the current TSS during the
2586 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2587 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2588 *
2589 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2590 * end up with smaller than "legal" TSS limits.
2591 */
2592 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2593 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2594 if (uCurTSSLimit < uCurTSSLimitMin)
2595 {
2596 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2597 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2598 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2599 }
2600
2601 /*
2602 * Verify that the new TSS can be accessed and map it. Map only the required contents
2603 * and not the entire TSS.
2604 */
2605 void *pvNewTSS;
2606 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2607 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2608 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2609 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2610 * not perform correct translation if this happens. See Intel spec. 7.2.1
2611 * "Task-State Segment" */
2612 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2613 if (rcStrict != VINF_SUCCESS)
2614 {
2615 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2616 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2617 return rcStrict;
2618 }
2619
2620 /*
2621 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2622 */
2623 uint32_t u32EFlags = pCtx->eflags.u32;
2624 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2625 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2626 {
2627 PX86DESC pDescCurTSS;
2628 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2629 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2630 if (rcStrict != VINF_SUCCESS)
2631 {
2632 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2633 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2634 return rcStrict;
2635 }
2636
2637 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2638 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2639 if (rcStrict != VINF_SUCCESS)
2640 {
2641 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2642 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2643 return rcStrict;
2644 }
2645
2646 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2647 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2648 {
2649 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2650 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2651 u32EFlags &= ~X86_EFL_NT;
2652 }
2653 }
2654
2655 /*
2656 * Save the CPU state into the current TSS.
2657 */
2658 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2659 if (GCPtrNewTSS == GCPtrCurTSS)
2660 {
2661 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2662 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2663 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2664 }
2665 if (fIsNewTSS386)
2666 {
2667 /*
2668 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2669 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2670 */
2671 void *pvCurTSS32;
2672 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2673 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2674 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2675 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2676 if (rcStrict != VINF_SUCCESS)
2677 {
2678 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2679 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2680 return rcStrict;
2681 }
2682
2683 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2684 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2685 pCurTSS32->eip = uNextEip;
2686 pCurTSS32->eflags = u32EFlags;
2687 pCurTSS32->eax = pCtx->eax;
2688 pCurTSS32->ecx = pCtx->ecx;
2689 pCurTSS32->edx = pCtx->edx;
2690 pCurTSS32->ebx = pCtx->ebx;
2691 pCurTSS32->esp = pCtx->esp;
2692 pCurTSS32->ebp = pCtx->ebp;
2693 pCurTSS32->esi = pCtx->esi;
2694 pCurTSS32->edi = pCtx->edi;
2695 pCurTSS32->es = pCtx->es.Sel;
2696 pCurTSS32->cs = pCtx->cs.Sel;
2697 pCurTSS32->ss = pCtx->ss.Sel;
2698 pCurTSS32->ds = pCtx->ds.Sel;
2699 pCurTSS32->fs = pCtx->fs.Sel;
2700 pCurTSS32->gs = pCtx->gs.Sel;
2701
2702 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2703 if (rcStrict != VINF_SUCCESS)
2704 {
2705 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2706 VBOXSTRICTRC_VAL(rcStrict)));
2707 return rcStrict;
2708 }
2709 }
2710 else
2711 {
2712 /*
2713 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2714 */
2715 void *pvCurTSS16;
2716 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2717 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2718 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2719 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2720 if (rcStrict != VINF_SUCCESS)
2721 {
2722 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2723 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2724 return rcStrict;
2725 }
2726
2727 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2728 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2729 pCurTSS16->ip = uNextEip;
2730 pCurTSS16->flags = u32EFlags;
2731 pCurTSS16->ax = pCtx->ax;
2732 pCurTSS16->cx = pCtx->cx;
2733 pCurTSS16->dx = pCtx->dx;
2734 pCurTSS16->bx = pCtx->bx;
2735 pCurTSS16->sp = pCtx->sp;
2736 pCurTSS16->bp = pCtx->bp;
2737 pCurTSS16->si = pCtx->si;
2738 pCurTSS16->di = pCtx->di;
2739 pCurTSS16->es = pCtx->es.Sel;
2740 pCurTSS16->cs = pCtx->cs.Sel;
2741 pCurTSS16->ss = pCtx->ss.Sel;
2742 pCurTSS16->ds = pCtx->ds.Sel;
2743
2744 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2745 if (rcStrict != VINF_SUCCESS)
2746 {
2747 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2748 VBOXSTRICTRC_VAL(rcStrict)));
2749 return rcStrict;
2750 }
2751 }
2752
2753 /*
2754 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2755 */
2756 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2757 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2758 {
2759 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2760 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2761 pNewTSS->selPrev = pCtx->tr.Sel;
2762 }
2763
2764 /*
2765 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2766 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2767 */
2768 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2769 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2770 bool fNewDebugTrap;
2771 if (fIsNewTSS386)
2772 {
2773 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2774 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2775 uNewEip = pNewTSS32->eip;
2776 uNewEflags = pNewTSS32->eflags;
2777 uNewEax = pNewTSS32->eax;
2778 uNewEcx = pNewTSS32->ecx;
2779 uNewEdx = pNewTSS32->edx;
2780 uNewEbx = pNewTSS32->ebx;
2781 uNewEsp = pNewTSS32->esp;
2782 uNewEbp = pNewTSS32->ebp;
2783 uNewEsi = pNewTSS32->esi;
2784 uNewEdi = pNewTSS32->edi;
2785 uNewES = pNewTSS32->es;
2786 uNewCS = pNewTSS32->cs;
2787 uNewSS = pNewTSS32->ss;
2788 uNewDS = pNewTSS32->ds;
2789 uNewFS = pNewTSS32->fs;
2790 uNewGS = pNewTSS32->gs;
2791 uNewLdt = pNewTSS32->selLdt;
2792 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2793 }
2794 else
2795 {
2796 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2797 uNewCr3 = 0;
2798 uNewEip = pNewTSS16->ip;
2799 uNewEflags = pNewTSS16->flags;
2800 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2801 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2802 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2803 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2804 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2805 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2806 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2807 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2808 uNewES = pNewTSS16->es;
2809 uNewCS = pNewTSS16->cs;
2810 uNewSS = pNewTSS16->ss;
2811 uNewDS = pNewTSS16->ds;
2812 uNewFS = 0;
2813 uNewGS = 0;
2814 uNewLdt = pNewTSS16->selLdt;
2815 fNewDebugTrap = false;
2816 }
2817
2818 if (GCPtrNewTSS == GCPtrCurTSS)
2819 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2820 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2821
2822 /*
2823 * We're done accessing the new TSS.
2824 */
2825 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2826 if (rcStrict != VINF_SUCCESS)
2827 {
2828 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2829 return rcStrict;
2830 }
2831
2832 /*
2833 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2834 */
2835 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2836 {
2837 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2838 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2839 if (rcStrict != VINF_SUCCESS)
2840 {
2841 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2842 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2843 return rcStrict;
2844 }
2845
2846 /* Check that the descriptor indicates the new TSS is available (not busy). */
2847 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2848 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2849 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2850
2851 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2852 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2853 if (rcStrict != VINF_SUCCESS)
2854 {
2855 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2856 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2857 return rcStrict;
2858 }
2859 }
2860
2861 /*
2862 * From this point on, we're technically in the new task. We will defer exceptions
2863 * until the completion of the task switch but before executing any instructions in the new task.
2864 */
2865 pCtx->tr.Sel = SelTSS;
2866 pCtx->tr.ValidSel = SelTSS;
2867 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2868 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2869 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2870 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2871 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2872
2873 /* Set the busy bit in TR. */
2874 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2875 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2876 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2877 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2878 {
2879 uNewEflags |= X86_EFL_NT;
2880 }
2881
2882 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2883 pCtx->cr0 |= X86_CR0_TS;
2884 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2885
2886 pCtx->eip = uNewEip;
2887 pCtx->eax = uNewEax;
2888 pCtx->ecx = uNewEcx;
2889 pCtx->edx = uNewEdx;
2890 pCtx->ebx = uNewEbx;
2891 pCtx->esp = uNewEsp;
2892 pCtx->ebp = uNewEbp;
2893 pCtx->esi = uNewEsi;
2894 pCtx->edi = uNewEdi;
2895
2896 uNewEflags &= X86_EFL_LIVE_MASK;
2897 uNewEflags |= X86_EFL_RA1_MASK;
2898 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2899
2900 /*
2901 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2902 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2903 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2904 */
2905 pCtx->es.Sel = uNewES;
2906 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2907 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2908
2909 pCtx->cs.Sel = uNewCS;
2910 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2911 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2912
2913 pCtx->ss.Sel = uNewSS;
2914 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2915 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2916
2917 pCtx->ds.Sel = uNewDS;
2918 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2919 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2920
2921 pCtx->fs.Sel = uNewFS;
2922 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2923 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2924
2925 pCtx->gs.Sel = uNewGS;
2926 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2927 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2928 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2929
2930 pCtx->ldtr.Sel = uNewLdt;
2931 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2932 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2933 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2934
2935 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2936 {
2937 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2938 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2939 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2940 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2941 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2942 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2943 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2944 }
2945
2946 /*
2947 * Switch CR3 for the new task.
2948 */
2949 if ( fIsNewTSS386
2950 && (pCtx->cr0 & X86_CR0_PG))
2951 {
2952 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2953 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2954 {
2955 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2956 AssertRCSuccessReturn(rc, rc);
2957 }
2958 else
2959 pCtx->cr3 = uNewCr3;
2960
2961 /* Inform PGM. */
2962 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2963 {
2964 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2965 AssertRCReturn(rc, rc);
2966 /* ignore informational status codes */
2967 }
2968 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2969 }
2970
2971 /*
2972 * Switch LDTR for the new task.
2973 */
2974 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2975 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2976 else
2977 {
2978 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2979
2980 IEMSELDESC DescNewLdt;
2981 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2982 if (rcStrict != VINF_SUCCESS)
2983 {
2984 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2985 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2986 return rcStrict;
2987 }
2988 if ( !DescNewLdt.Legacy.Gen.u1Present
2989 || DescNewLdt.Legacy.Gen.u1DescType
2990 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2991 {
2992 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2993 uNewLdt, DescNewLdt.Legacy.u));
2994 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2995 }
2996
2997 pCtx->ldtr.ValidSel = uNewLdt;
2998 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2999 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3000 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3001 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3002 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3003 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3004 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
3005 }
3006
3007 IEMSELDESC DescSS;
3008 if (IEM_IS_V86_MODE(pIemCpu))
3009 {
3010 pIemCpu->uCpl = 3;
3011 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
3012 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
3013 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
3014 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
3015 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
3016 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
3017 }
3018 else
3019 {
3020 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
3021
3022 /*
3023 * Load the stack segment for the new task.
3024 */
3025 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3026 {
3027 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3028 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3029 }
3030
3031 /* Fetch the descriptor. */
3032 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
3033 if (rcStrict != VINF_SUCCESS)
3034 {
3035 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3036 VBOXSTRICTRC_VAL(rcStrict)));
3037 return rcStrict;
3038 }
3039
3040 /* SS must be a data segment and writable. */
3041 if ( !DescSS.Legacy.Gen.u1DescType
3042 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3043 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3044 {
3045 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3046 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3047 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3048 }
3049
3050 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3051 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3052 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3053 {
3054 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3055 uNewCpl));
3056 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3057 }
3058
3059 /* Is it there? */
3060 if (!DescSS.Legacy.Gen.u1Present)
3061 {
3062 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3063 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3064 }
3065
3066 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3067 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3068
3069 /* Set the accessed bit before committing the result into SS. */
3070 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3071 {
3072 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3073 if (rcStrict != VINF_SUCCESS)
3074 return rcStrict;
3075 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3076 }
3077
3078 /* Commit SS. */
3079 pCtx->ss.Sel = uNewSS;
3080 pCtx->ss.ValidSel = uNewSS;
3081 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3082 pCtx->ss.u32Limit = cbLimit;
3083 pCtx->ss.u64Base = u64Base;
3084 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3085 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3086
3087 /* CPL has changed, update IEM before loading rest of segments. */
3088 pIemCpu->uCpl = uNewCpl;
3089
3090 /*
3091 * Load the data segments for the new task.
3092 */
3093 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3094 if (rcStrict != VINF_SUCCESS)
3095 return rcStrict;
3096 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3097 if (rcStrict != VINF_SUCCESS)
3098 return rcStrict;
3099 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3100 if (rcStrict != VINF_SUCCESS)
3101 return rcStrict;
3102 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3103 if (rcStrict != VINF_SUCCESS)
3104 return rcStrict;
3105
3106 /*
3107 * Load the code segment for the new task.
3108 */
3109 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3110 {
3111 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3112 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3113 }
3114
3115 /* Fetch the descriptor. */
3116 IEMSELDESC DescCS;
3117 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3118 if (rcStrict != VINF_SUCCESS)
3119 {
3120 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3121 return rcStrict;
3122 }
3123
3124 /* CS must be a code segment. */
3125 if ( !DescCS.Legacy.Gen.u1DescType
3126 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3127 {
3128 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3129 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3130 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3131 }
3132
3133 /* For conforming CS, DPL must be less than or equal to the RPL. */
3134 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3135 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3136 {
3137 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3138 DescCS.Legacy.Gen.u2Dpl));
3139 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3140 }
3141
3142 /* For non-conforming CS, DPL must match RPL. */
3143 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3144 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3145 {
3146 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3147 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3148 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3149 }
3150
3151 /* Is it there? */
3152 if (!DescCS.Legacy.Gen.u1Present)
3153 {
3154 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3155 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3156 }
3157
3158 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3159 u64Base = X86DESC_BASE(&DescCS.Legacy);
3160
3161 /* Set the accessed bit before committing the result into CS. */
3162 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3163 {
3164 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3165 if (rcStrict != VINF_SUCCESS)
3166 return rcStrict;
3167 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3168 }
3169
3170 /* Commit CS. */
3171 pCtx->cs.Sel = uNewCS;
3172 pCtx->cs.ValidSel = uNewCS;
3173 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3174 pCtx->cs.u32Limit = cbLimit;
3175 pCtx->cs.u64Base = u64Base;
3176 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3177 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3178 }
3179
3180 /** @todo Debug trap. */
3181 if (fIsNewTSS386 && fNewDebugTrap)
3182 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3183
3184 /*
3185 * Construct the error code masks based on what caused this task switch.
3186 * See Intel Instruction reference for INT.
3187 */
3188 uint16_t uExt;
3189 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3190 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3191 {
3192 uExt = 1;
3193 }
3194 else
3195 uExt = 0;
3196
3197 /*
3198 * Push any error code on to the new stack.
3199 */
3200 if (fFlags & IEM_XCPT_FLAGS_ERR)
3201 {
3202 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3203 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3204 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3205
3206 /* Check that there is sufficient space on the stack. */
3207 /** @todo Factor out segment limit checking for normal/expand down segments
3208 * into a separate function. */
3209 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3210 {
3211 if ( pCtx->esp - 1 > cbLimitSS
3212 || pCtx->esp < cbStackFrame)
3213 {
3214 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3215 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3216 cbStackFrame));
3217 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3218 }
3219 }
3220 else
3221 {
3222 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3223 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3224 {
3225 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3226 cbStackFrame));
3227 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3228 }
3229 }
3230
3231
3232 if (fIsNewTSS386)
3233 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3234 else
3235 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3236 if (rcStrict != VINF_SUCCESS)
3237 {
3238 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3239 VBOXSTRICTRC_VAL(rcStrict)));
3240 return rcStrict;
3241 }
3242 }
3243
3244 /* Check the new EIP against the new CS limit. */
3245 if (pCtx->eip > pCtx->cs.u32Limit)
3246 {
3247 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3248 pCtx->eip, pCtx->cs.u32Limit));
3249 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3250 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3251 }
3252
3253 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3254 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3255}
3256
3257
3258/**
3259 * Implements exceptions and interrupts for protected mode.
3260 *
3261 * @returns VBox strict status code.
3262 * @param pIemCpu The IEM per CPU instance data.
3263 * @param pCtx The CPU context.
3264 * @param cbInstr The number of bytes to offset rIP by in the return
3265 * address.
3266 * @param u8Vector The interrupt / exception vector number.
3267 * @param fFlags The flags.
3268 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3269 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3270 */
3271IEM_STATIC VBOXSTRICTRC
3272iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3273 PCPUMCTX pCtx,
3274 uint8_t cbInstr,
3275 uint8_t u8Vector,
3276 uint32_t fFlags,
3277 uint16_t uErr,
3278 uint64_t uCr2)
3279{
3280 /*
3281 * Read the IDT entry.
3282 */
3283 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3284 {
3285 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3286 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3287 }
3288 X86DESC Idte;
3289 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3290 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3291 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3292 return rcStrict;
3293 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3294 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3295 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3296
3297 /*
3298 * Check the descriptor type, DPL and such.
3299 * ASSUMES this is done in the same order as described for call-gate calls.
3300 */
3301 if (Idte.Gate.u1DescType)
3302 {
3303 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3304 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3305 }
3306 bool fTaskGate = false;
3307 uint8_t f32BitGate = true;
3308 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3309 switch (Idte.Gate.u4Type)
3310 {
3311 case X86_SEL_TYPE_SYS_UNDEFINED:
3312 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3313 case X86_SEL_TYPE_SYS_LDT:
3314 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3315 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3316 case X86_SEL_TYPE_SYS_UNDEFINED2:
3317 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3318 case X86_SEL_TYPE_SYS_UNDEFINED3:
3319 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3320 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3321 case X86_SEL_TYPE_SYS_UNDEFINED4:
3322 {
3323 /** @todo check what actually happens when the type is wrong...
3324 * esp. call gates. */
3325 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3326 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3327 }
3328
3329 case X86_SEL_TYPE_SYS_286_INT_GATE:
3330 f32BitGate = false;
3331 case X86_SEL_TYPE_SYS_386_INT_GATE:
3332 fEflToClear |= X86_EFL_IF;
3333 break;
3334
3335 case X86_SEL_TYPE_SYS_TASK_GATE:
3336 fTaskGate = true;
3337#ifndef IEM_IMPLEMENTS_TASKSWITCH
3338 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3339#endif
3340 break;
3341
3342 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3343 f32BitGate = false;
3344 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3345 break;
3346
3347 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3348 }
3349
3350 /* Check DPL against CPL if applicable. */
3351 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3352 {
3353 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3354 {
3355 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3356 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3357 }
3358 }
3359
3360 /* Is it there? */
3361 if (!Idte.Gate.u1Present)
3362 {
3363 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3364 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3365 }
3366
3367 /* Is it a task-gate? */
3368 if (fTaskGate)
3369 {
3370 /*
3371 * Construct the error code masks based on what caused this task switch.
3372 * See Intel Instruction reference for INT.
3373 */
3374 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3375 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3376 RTSEL SelTSS = Idte.Gate.u16Sel;
3377
3378 /*
3379 * Fetch the TSS descriptor in the GDT.
3380 */
3381 IEMSELDESC DescTSS;
3382 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3383 if (rcStrict != VINF_SUCCESS)
3384 {
3385 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3386 VBOXSTRICTRC_VAL(rcStrict)));
3387 return rcStrict;
3388 }
3389
3390 /* The TSS descriptor must be a system segment and be available (not busy). */
3391 if ( DescTSS.Legacy.Gen.u1DescType
3392 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3393 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3394 {
3395 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3396 u8Vector, SelTSS, DescTSS.Legacy.au64));
3397 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3398 }
3399
3400 /* The TSS must be present. */
3401 if (!DescTSS.Legacy.Gen.u1Present)
3402 {
3403 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3404 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3405 }
3406
3407 /* Do the actual task switch. */
3408 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3409 }
3410
3411 /* A null CS is bad. */
3412 RTSEL NewCS = Idte.Gate.u16Sel;
3413 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3414 {
3415 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3416 return iemRaiseGeneralProtectionFault0(pIemCpu);
3417 }
3418
3419 /* Fetch the descriptor for the new CS. */
3420 IEMSELDESC DescCS;
3421 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3422 if (rcStrict != VINF_SUCCESS)
3423 {
3424 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3425 return rcStrict;
3426 }
3427
3428 /* Must be a code segment. */
3429 if (!DescCS.Legacy.Gen.u1DescType)
3430 {
3431 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3432 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3433 }
3434 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3435 {
3436 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3437 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3438 }
3439
3440 /* Don't allow lowering the privilege level. */
3441 /** @todo Does the lowering of privileges apply to software interrupts
3442 * only? This has bearings on the more-privileged or
3443 * same-privilege stack behavior further down. A testcase would
3444 * be nice. */
3445 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3446 {
3447 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3448 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3449 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3450 }
3451
3452 /* Make sure the selector is present. */
3453 if (!DescCS.Legacy.Gen.u1Present)
3454 {
3455 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3456 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3457 }
3458
3459 /* Check the new EIP against the new CS limit. */
3460 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3461 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3462 ? Idte.Gate.u16OffsetLow
3463 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3464 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3465 if (uNewEip > cbLimitCS)
3466 {
3467 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3468 u8Vector, uNewEip, cbLimitCS, NewCS));
3469 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3470 }
3471
3472 /* Calc the flag image to push. */
3473 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3474 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3475 fEfl &= ~X86_EFL_RF;
3476 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3477 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3478
3479 /* From V8086 mode only go to CPL 0. */
3480 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3481 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3482 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3483 {
3484 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3485 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3486 }
3487
3488 /*
3489 * If the privilege level changes, we need to get a new stack from the TSS.
3490 * This in turns means validating the new SS and ESP...
3491 */
3492 if (uNewCpl != pIemCpu->uCpl)
3493 {
3494 RTSEL NewSS;
3495 uint32_t uNewEsp;
3496 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3497 if (rcStrict != VINF_SUCCESS)
3498 return rcStrict;
3499
3500 IEMSELDESC DescSS;
3501 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3502 if (rcStrict != VINF_SUCCESS)
3503 return rcStrict;
3504
3505 /* Check that there is sufficient space for the stack frame. */
3506 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3507 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3508 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3509 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3510
3511 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3512 {
3513 if ( uNewEsp - 1 > cbLimitSS
3514 || uNewEsp < cbStackFrame)
3515 {
3516 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3517 u8Vector, NewSS, uNewEsp, cbStackFrame));
3518 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3519 }
3520 }
3521 else
3522 {
3523 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3524 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3525 {
3526 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3527 u8Vector, NewSS, uNewEsp, cbStackFrame));
3528 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3529 }
3530 }
3531
3532 /*
3533 * Start making changes.
3534 */
3535
3536 /* Create the stack frame. */
3537 RTPTRUNION uStackFrame;
3538 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3539 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3540 if (rcStrict != VINF_SUCCESS)
3541 return rcStrict;
3542 void * const pvStackFrame = uStackFrame.pv;
3543 if (f32BitGate)
3544 {
3545 if (fFlags & IEM_XCPT_FLAGS_ERR)
3546 *uStackFrame.pu32++ = uErr;
3547 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3548 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3549 uStackFrame.pu32[2] = fEfl;
3550 uStackFrame.pu32[3] = pCtx->esp;
3551 uStackFrame.pu32[4] = pCtx->ss.Sel;
3552 if (fEfl & X86_EFL_VM)
3553 {
3554 uStackFrame.pu32[1] = pCtx->cs.Sel;
3555 uStackFrame.pu32[5] = pCtx->es.Sel;
3556 uStackFrame.pu32[6] = pCtx->ds.Sel;
3557 uStackFrame.pu32[7] = pCtx->fs.Sel;
3558 uStackFrame.pu32[8] = pCtx->gs.Sel;
3559 }
3560 }
3561 else
3562 {
3563 if (fFlags & IEM_XCPT_FLAGS_ERR)
3564 *uStackFrame.pu16++ = uErr;
3565 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3566 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3567 uStackFrame.pu16[2] = fEfl;
3568 uStackFrame.pu16[3] = pCtx->sp;
3569 uStackFrame.pu16[4] = pCtx->ss.Sel;
3570 if (fEfl & X86_EFL_VM)
3571 {
3572 uStackFrame.pu16[1] = pCtx->cs.Sel;
3573 uStackFrame.pu16[5] = pCtx->es.Sel;
3574 uStackFrame.pu16[6] = pCtx->ds.Sel;
3575 uStackFrame.pu16[7] = pCtx->fs.Sel;
3576 uStackFrame.pu16[8] = pCtx->gs.Sel;
3577 }
3578 }
3579 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3580 if (rcStrict != VINF_SUCCESS)
3581 return rcStrict;
3582
3583 /* Mark the selectors 'accessed' (hope this is the correct time). */
3584 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3585 * after pushing the stack frame? (Write protect the gdt + stack to
3586 * find out.) */
3587 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3588 {
3589 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3590 if (rcStrict != VINF_SUCCESS)
3591 return rcStrict;
3592 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3593 }
3594
3595 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3596 {
3597 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3598 if (rcStrict != VINF_SUCCESS)
3599 return rcStrict;
3600 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3601 }
3602
3603 /*
3604 * Start comitting the register changes (joins with the DPL=CPL branch).
3605 */
3606 pCtx->ss.Sel = NewSS;
3607 pCtx->ss.ValidSel = NewSS;
3608 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3609 pCtx->ss.u32Limit = cbLimitSS;
3610 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3611 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3612 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3613 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3614 * SP is loaded).
3615 * Need to check the other combinations too:
3616 * - 16-bit TSS, 32-bit handler
3617 * - 32-bit TSS, 16-bit handler */
3618 if (!pCtx->ss.Attr.n.u1DefBig)
3619 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
3620 else
3621 pCtx->rsp = uNewEsp - cbStackFrame;
3622 pIemCpu->uCpl = uNewCpl;
3623
3624 if (fEfl & X86_EFL_VM)
3625 {
3626 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3627 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3628 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3629 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3630 }
3631 }
3632 /*
3633 * Same privilege, no stack change and smaller stack frame.
3634 */
3635 else
3636 {
3637 uint64_t uNewRsp;
3638 RTPTRUNION uStackFrame;
3639 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3640 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3641 if (rcStrict != VINF_SUCCESS)
3642 return rcStrict;
3643 void * const pvStackFrame = uStackFrame.pv;
3644
3645 if (f32BitGate)
3646 {
3647 if (fFlags & IEM_XCPT_FLAGS_ERR)
3648 *uStackFrame.pu32++ = uErr;
3649 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3650 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3651 uStackFrame.pu32[2] = fEfl;
3652 }
3653 else
3654 {
3655 if (fFlags & IEM_XCPT_FLAGS_ERR)
3656 *uStackFrame.pu16++ = uErr;
3657 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3658 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3659 uStackFrame.pu16[2] = fEfl;
3660 }
3661 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3662 if (rcStrict != VINF_SUCCESS)
3663 return rcStrict;
3664
3665 /* Mark the CS selector as 'accessed'. */
3666 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3667 {
3668 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3669 if (rcStrict != VINF_SUCCESS)
3670 return rcStrict;
3671 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3672 }
3673
3674 /*
3675 * Start committing the register changes (joins with the other branch).
3676 */
3677 pCtx->rsp = uNewRsp;
3678 }
3679
3680 /* ... register committing continues. */
3681 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3682 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3683 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3684 pCtx->cs.u32Limit = cbLimitCS;
3685 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3686 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3687
3688 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3689 fEfl &= ~fEflToClear;
3690 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3691
3692 if (fFlags & IEM_XCPT_FLAGS_CR2)
3693 pCtx->cr2 = uCr2;
3694
3695 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3696 iemRaiseXcptAdjustState(pCtx, u8Vector);
3697
3698 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3699}
3700
3701
3702/**
3703 * Implements exceptions and interrupts for long mode.
3704 *
3705 * @returns VBox strict status code.
3706 * @param pIemCpu The IEM per CPU instance data.
3707 * @param pCtx The CPU context.
3708 * @param cbInstr The number of bytes to offset rIP by in the return
3709 * address.
3710 * @param u8Vector The interrupt / exception vector number.
3711 * @param fFlags The flags.
3712 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3713 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3714 */
3715IEM_STATIC VBOXSTRICTRC
3716iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3717 PCPUMCTX pCtx,
3718 uint8_t cbInstr,
3719 uint8_t u8Vector,
3720 uint32_t fFlags,
3721 uint16_t uErr,
3722 uint64_t uCr2)
3723{
3724 /*
3725 * Read the IDT entry.
3726 */
3727 uint16_t offIdt = (uint16_t)u8Vector << 4;
3728 if (pCtx->idtr.cbIdt < offIdt + 7)
3729 {
3730 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3731 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3732 }
3733 X86DESC64 Idte;
3734 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3735 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3736 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3737 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3738 return rcStrict;
3739 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3740 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3741 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3742
3743 /*
3744 * Check the descriptor type, DPL and such.
3745 * ASSUMES this is done in the same order as described for call-gate calls.
3746 */
3747 if (Idte.Gate.u1DescType)
3748 {
3749 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3750 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3751 }
3752 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3753 switch (Idte.Gate.u4Type)
3754 {
3755 case AMD64_SEL_TYPE_SYS_INT_GATE:
3756 fEflToClear |= X86_EFL_IF;
3757 break;
3758 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3759 break;
3760
3761 default:
3762 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3763 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3764 }
3765
3766 /* Check DPL against CPL if applicable. */
3767 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3768 {
3769 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3770 {
3771 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3772 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3773 }
3774 }
3775
3776 /* Is it there? */
3777 if (!Idte.Gate.u1Present)
3778 {
3779 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3780 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3781 }
3782
3783 /* A null CS is bad. */
3784 RTSEL NewCS = Idte.Gate.u16Sel;
3785 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3786 {
3787 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3788 return iemRaiseGeneralProtectionFault0(pIemCpu);
3789 }
3790
3791 /* Fetch the descriptor for the new CS. */
3792 IEMSELDESC DescCS;
3793 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3794 if (rcStrict != VINF_SUCCESS)
3795 {
3796 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3797 return rcStrict;
3798 }
3799
3800 /* Must be a 64-bit code segment. */
3801 if (!DescCS.Long.Gen.u1DescType)
3802 {
3803 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3804 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3805 }
3806 if ( !DescCS.Long.Gen.u1Long
3807 || DescCS.Long.Gen.u1DefBig
3808 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3809 {
3810 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3811 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3812 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3813 }
3814
3815 /* Don't allow lowering the privilege level. For non-conforming CS
3816 selectors, the CS.DPL sets the privilege level the trap/interrupt
3817 handler runs at. For conforming CS selectors, the CPL remains
3818 unchanged, but the CS.DPL must be <= CPL. */
3819 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3820 * when CPU in Ring-0. Result \#GP? */
3821 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3822 {
3823 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3824 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3825 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3826 }
3827
3828
3829 /* Make sure the selector is present. */
3830 if (!DescCS.Legacy.Gen.u1Present)
3831 {
3832 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3833 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3834 }
3835
3836 /* Check that the new RIP is canonical. */
3837 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3838 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3839 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3840 if (!IEM_IS_CANONICAL(uNewRip))
3841 {
3842 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3843 return iemRaiseGeneralProtectionFault0(pIemCpu);
3844 }
3845
3846 /*
3847 * If the privilege level changes or if the IST isn't zero, we need to get
3848 * a new stack from the TSS.
3849 */
3850 uint64_t uNewRsp;
3851 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3852 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3853 if ( uNewCpl != pIemCpu->uCpl
3854 || Idte.Gate.u3IST != 0)
3855 {
3856 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3857 if (rcStrict != VINF_SUCCESS)
3858 return rcStrict;
3859 }
3860 else
3861 uNewRsp = pCtx->rsp;
3862 uNewRsp &= ~(uint64_t)0xf;
3863
3864 /*
3865 * Calc the flag image to push.
3866 */
3867 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3868 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3869 fEfl &= ~X86_EFL_RF;
3870 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3871 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3872
3873 /*
3874 * Start making changes.
3875 */
3876
3877 /* Create the stack frame. */
3878 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3879 RTPTRUNION uStackFrame;
3880 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3881 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3882 if (rcStrict != VINF_SUCCESS)
3883 return rcStrict;
3884 void * const pvStackFrame = uStackFrame.pv;
3885
3886 if (fFlags & IEM_XCPT_FLAGS_ERR)
3887 *uStackFrame.pu64++ = uErr;
3888 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3889 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3890 uStackFrame.pu64[2] = fEfl;
3891 uStackFrame.pu64[3] = pCtx->rsp;
3892 uStackFrame.pu64[4] = pCtx->ss.Sel;
3893 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3894 if (rcStrict != VINF_SUCCESS)
3895 return rcStrict;
3896
3897 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3898 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3899 * after pushing the stack frame? (Write protect the gdt + stack to
3900 * find out.) */
3901 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3902 {
3903 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3904 if (rcStrict != VINF_SUCCESS)
3905 return rcStrict;
3906 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3907 }
3908
3909 /*
3910 * Start comitting the register changes.
3911 */
3912 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3913 * hidden registers when interrupting 32-bit or 16-bit code! */
3914 if (uNewCpl != pIemCpu->uCpl)
3915 {
3916 pCtx->ss.Sel = 0 | uNewCpl;
3917 pCtx->ss.ValidSel = 0 | uNewCpl;
3918 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3919 pCtx->ss.u32Limit = UINT32_MAX;
3920 pCtx->ss.u64Base = 0;
3921 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3922 }
3923 pCtx->rsp = uNewRsp - cbStackFrame;
3924 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3925 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3926 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3927 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3928 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3929 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3930 pCtx->rip = uNewRip;
3931 pIemCpu->uCpl = uNewCpl;
3932
3933 fEfl &= ~fEflToClear;
3934 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3935
3936 if (fFlags & IEM_XCPT_FLAGS_CR2)
3937 pCtx->cr2 = uCr2;
3938
3939 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3940 iemRaiseXcptAdjustState(pCtx, u8Vector);
3941
3942 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3943}
3944
3945
3946/**
3947 * Implements exceptions and interrupts.
3948 *
3949 * All exceptions and interrupts goes thru this function!
3950 *
3951 * @returns VBox strict status code.
3952 * @param pIemCpu The IEM per CPU instance data.
3953 * @param cbInstr The number of bytes to offset rIP by in the return
3954 * address.
3955 * @param u8Vector The interrupt / exception vector number.
3956 * @param fFlags The flags.
3957 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3958 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3959 */
3960DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3961iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3962 uint8_t cbInstr,
3963 uint8_t u8Vector,
3964 uint32_t fFlags,
3965 uint16_t uErr,
3966 uint64_t uCr2)
3967{
3968 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3969#ifdef IN_RING0
3970 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3971 AssertRCReturn(rc, rc);
3972#endif
3973
3974 /*
3975 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3976 */
3977 if ( pCtx->eflags.Bits.u1VM
3978 && pCtx->eflags.Bits.u2IOPL != 3
3979 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3980 && (pCtx->cr0 & X86_CR0_PE) )
3981 {
3982 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3983 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3984 u8Vector = X86_XCPT_GP;
3985 uErr = 0;
3986 }
3987#ifdef DBGFTRACE_ENABLED
3988 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3989 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3990 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3991#endif
3992
3993 /*
3994 * Do recursion accounting.
3995 */
3996 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3997 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3998 if (pIemCpu->cXcptRecursions == 0)
3999 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4000 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
4001 else
4002 {
4003 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4004 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
4005
4006 /** @todo double and tripple faults. */
4007 if (pIemCpu->cXcptRecursions >= 3)
4008 {
4009#ifdef DEBUG_bird
4010 AssertFailed();
4011#endif
4012 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4013 }
4014
4015 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
4016 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
4017 {
4018 ....
4019 } */
4020 }
4021 pIemCpu->cXcptRecursions++;
4022 pIemCpu->uCurXcpt = u8Vector;
4023 pIemCpu->fCurXcpt = fFlags;
4024
4025 /*
4026 * Extensive logging.
4027 */
4028#if defined(LOG_ENABLED) && defined(IN_RING3)
4029 if (LogIs3Enabled())
4030 {
4031 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4032 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4033 char szRegs[4096];
4034 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4035 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4036 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4037 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4038 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4039 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4040 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4041 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4042 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4043 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4044 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4045 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4046 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4047 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4048 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4049 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4050 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4051 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4052 " efer=%016VR{efer}\n"
4053 " pat=%016VR{pat}\n"
4054 " sf_mask=%016VR{sf_mask}\n"
4055 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4056 " lstar=%016VR{lstar}\n"
4057 " star=%016VR{star} cstar=%016VR{cstar}\n"
4058 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4059 );
4060
4061 char szInstr[256];
4062 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4063 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4064 szInstr, sizeof(szInstr), NULL);
4065 Log3(("%s%s\n", szRegs, szInstr));
4066 }
4067#endif /* LOG_ENABLED */
4068
4069 /*
4070 * Call the mode specific worker function.
4071 */
4072 VBOXSTRICTRC rcStrict;
4073 if (!(pCtx->cr0 & X86_CR0_PE))
4074 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4075 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4076 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4077 else
4078 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4079
4080 /*
4081 * Unwind.
4082 */
4083 pIemCpu->cXcptRecursions--;
4084 pIemCpu->uCurXcpt = uPrevXcpt;
4085 pIemCpu->fCurXcpt = fPrevXcpt;
4086 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4087 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4088 return rcStrict;
4089}
4090
4091
4092/** \#DE - 00. */
4093DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4094{
4095 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4096}
4097
4098
4099/** \#DB - 01.
4100 * @note This automatically clear DR7.GD. */
4101DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4102{
4103 /** @todo set/clear RF. */
4104 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4105 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4106}
4107
4108
4109/** \#UD - 06. */
4110DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4111{
4112 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4113}
4114
4115
4116/** \#NM - 07. */
4117DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4118{
4119 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4120}
4121
4122
4123/** \#TS(err) - 0a. */
4124DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4125{
4126 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4127}
4128
4129
4130/** \#TS(tr) - 0a. */
4131DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4132{
4133 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4134 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4135}
4136
4137
4138/** \#TS(0) - 0a. */
4139DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4140{
4141 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4142 0, 0);
4143}
4144
4145
4146/** \#TS(err) - 0a. */
4147DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4148{
4149 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4150 uSel & X86_SEL_MASK_OFF_RPL, 0);
4151}
4152
4153
4154/** \#NP(err) - 0b. */
4155DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4156{
4157 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4158}
4159
4160
4161/** \#NP(seg) - 0b. */
4162DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4163{
4164 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4165 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4166}
4167
4168
4169/** \#NP(sel) - 0b. */
4170DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4171{
4172 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4173 uSel & ~X86_SEL_RPL, 0);
4174}
4175
4176
4177/** \#SS(seg) - 0c. */
4178DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4179{
4180 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4181 uSel & ~X86_SEL_RPL, 0);
4182}
4183
4184
4185/** \#SS(err) - 0c. */
4186DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4187{
4188 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4189}
4190
4191
4192/** \#GP(n) - 0d. */
4193DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4194{
4195 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4196}
4197
4198
4199/** \#GP(0) - 0d. */
4200DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4201{
4202 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4203}
4204
4205
4206/** \#GP(sel) - 0d. */
4207DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4208{
4209 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4210 Sel & ~X86_SEL_RPL, 0);
4211}
4212
4213
4214/** \#GP(0) - 0d. */
4215DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4216{
4217 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4218}
4219
4220
4221/** \#GP(sel) - 0d. */
4222DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4223{
4224 NOREF(iSegReg); NOREF(fAccess);
4225 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4226 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4227}
4228
4229
4230/** \#GP(sel) - 0d. */
4231DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4232{
4233 NOREF(Sel);
4234 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4235}
4236
4237
4238/** \#GP(sel) - 0d. */
4239DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4240{
4241 NOREF(iSegReg); NOREF(fAccess);
4242 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4243}
4244
4245
4246/** \#PF(n) - 0e. */
4247DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4248{
4249 uint16_t uErr;
4250 switch (rc)
4251 {
4252 case VERR_PAGE_NOT_PRESENT:
4253 case VERR_PAGE_TABLE_NOT_PRESENT:
4254 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4255 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4256 uErr = 0;
4257 break;
4258
4259 default:
4260 AssertMsgFailed(("%Rrc\n", rc));
4261 case VERR_ACCESS_DENIED:
4262 uErr = X86_TRAP_PF_P;
4263 break;
4264
4265 /** @todo reserved */
4266 }
4267
4268 if (pIemCpu->uCpl == 3)
4269 uErr |= X86_TRAP_PF_US;
4270
4271 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4272 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4273 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4274 uErr |= X86_TRAP_PF_ID;
4275
4276#if 0 /* This is so much non-sense, really. Why was it done like that? */
4277 /* Note! RW access callers reporting a WRITE protection fault, will clear
4278 the READ flag before calling. So, read-modify-write accesses (RW)
4279 can safely be reported as READ faults. */
4280 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4281 uErr |= X86_TRAP_PF_RW;
4282#else
4283 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4284 {
4285 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4286 uErr |= X86_TRAP_PF_RW;
4287 }
4288#endif
4289
4290 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4291 uErr, GCPtrWhere);
4292}
4293
4294
4295/** \#MF(0) - 10. */
4296DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4297{
4298 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4299}
4300
4301
4302/** \#AC(0) - 11. */
4303DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4304{
4305 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4306}
4307
4308
4309/**
4310 * Macro for calling iemCImplRaiseDivideError().
4311 *
4312 * This enables us to add/remove arguments and force different levels of
4313 * inlining as we wish.
4314 *
4315 * @return Strict VBox status code.
4316 */
4317#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4318IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4319{
4320 NOREF(cbInstr);
4321 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4322}
4323
4324
4325/**
4326 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4327 *
4328 * This enables us to add/remove arguments and force different levels of
4329 * inlining as we wish.
4330 *
4331 * @return Strict VBox status code.
4332 */
4333#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4334IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4335{
4336 NOREF(cbInstr);
4337 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4338}
4339
4340
4341/**
4342 * Macro for calling iemCImplRaiseInvalidOpcode().
4343 *
4344 * This enables us to add/remove arguments and force different levels of
4345 * inlining as we wish.
4346 *
4347 * @return Strict VBox status code.
4348 */
4349#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4350IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4351{
4352 NOREF(cbInstr);
4353 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4354}
4355
4356
4357/** @} */
4358
4359
4360/*
4361 *
4362 * Helpers routines.
4363 * Helpers routines.
4364 * Helpers routines.
4365 *
4366 */
4367
4368/**
4369 * Recalculates the effective operand size.
4370 *
4371 * @param pIemCpu The IEM state.
4372 */
4373IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4374{
4375 switch (pIemCpu->enmCpuMode)
4376 {
4377 case IEMMODE_16BIT:
4378 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4379 break;
4380 case IEMMODE_32BIT:
4381 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4382 break;
4383 case IEMMODE_64BIT:
4384 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4385 {
4386 case 0:
4387 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4388 break;
4389 case IEM_OP_PRF_SIZE_OP:
4390 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4391 break;
4392 case IEM_OP_PRF_SIZE_REX_W:
4393 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4394 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4395 break;
4396 }
4397 break;
4398 default:
4399 AssertFailed();
4400 }
4401}
4402
4403
4404/**
4405 * Sets the default operand size to 64-bit and recalculates the effective
4406 * operand size.
4407 *
4408 * @param pIemCpu The IEM state.
4409 */
4410IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4411{
4412 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4413 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4414 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4415 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4416 else
4417 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4418}
4419
4420
4421/*
4422 *
4423 * Common opcode decoders.
4424 * Common opcode decoders.
4425 * Common opcode decoders.
4426 *
4427 */
4428//#include <iprt/mem.h>
4429
4430/**
4431 * Used to add extra details about a stub case.
4432 * @param pIemCpu The IEM per CPU state.
4433 */
4434IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4435{
4436#if defined(LOG_ENABLED) && defined(IN_RING3)
4437 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4438 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4439 char szRegs[4096];
4440 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4441 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4442 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4443 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4444 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4445 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4446 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4447 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4448 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4449 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4450 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4451 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4452 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4453 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4454 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4455 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4456 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4457 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4458 " efer=%016VR{efer}\n"
4459 " pat=%016VR{pat}\n"
4460 " sf_mask=%016VR{sf_mask}\n"
4461 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4462 " lstar=%016VR{lstar}\n"
4463 " star=%016VR{star} cstar=%016VR{cstar}\n"
4464 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4465 );
4466
4467 char szInstr[256];
4468 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4469 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4470 szInstr, sizeof(szInstr), NULL);
4471
4472 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4473#else
4474 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4475#endif
4476}
4477
4478/**
4479 * Complains about a stub.
4480 *
4481 * Providing two versions of this macro, one for daily use and one for use when
4482 * working on IEM.
4483 */
4484#if 0
4485# define IEMOP_BITCH_ABOUT_STUB() \
4486 do { \
4487 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4488 iemOpStubMsg2(pIemCpu); \
4489 RTAssertPanic(); \
4490 } while (0)
4491#else
4492# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4493#endif
4494
4495/** Stubs an opcode. */
4496#define FNIEMOP_STUB(a_Name) \
4497 FNIEMOP_DEF(a_Name) \
4498 { \
4499 IEMOP_BITCH_ABOUT_STUB(); \
4500 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4501 } \
4502 typedef int ignore_semicolon
4503
4504/** Stubs an opcode. */
4505#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4506 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4507 { \
4508 IEMOP_BITCH_ABOUT_STUB(); \
4509 NOREF(a_Name0); \
4510 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4511 } \
4512 typedef int ignore_semicolon
4513
4514/** Stubs an opcode which currently should raise \#UD. */
4515#define FNIEMOP_UD_STUB(a_Name) \
4516 FNIEMOP_DEF(a_Name) \
4517 { \
4518 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4519 return IEMOP_RAISE_INVALID_OPCODE(); \
4520 } \
4521 typedef int ignore_semicolon
4522
4523/** Stubs an opcode which currently should raise \#UD. */
4524#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4525 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4526 { \
4527 NOREF(a_Name0); \
4528 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4529 return IEMOP_RAISE_INVALID_OPCODE(); \
4530 } \
4531 typedef int ignore_semicolon
4532
4533
4534
4535/** @name Register Access.
4536 * @{
4537 */
4538
4539/**
4540 * Gets a reference (pointer) to the specified hidden segment register.
4541 *
4542 * @returns Hidden register reference.
4543 * @param pIemCpu The per CPU data.
4544 * @param iSegReg The segment register.
4545 */
4546IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4547{
4548 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4549 PCPUMSELREG pSReg;
4550 switch (iSegReg)
4551 {
4552 case X86_SREG_ES: pSReg = &pCtx->es; break;
4553 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4554 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4555 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4556 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4557 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4558 default:
4559 AssertFailedReturn(NULL);
4560 }
4561#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4562 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4563 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4564#else
4565 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4566#endif
4567 return pSReg;
4568}
4569
4570
4571/**
4572 * Gets a reference (pointer) to the specified segment register (the selector
4573 * value).
4574 *
4575 * @returns Pointer to the selector variable.
4576 * @param pIemCpu The per CPU data.
4577 * @param iSegReg The segment register.
4578 */
4579IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4580{
4581 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4582 switch (iSegReg)
4583 {
4584 case X86_SREG_ES: return &pCtx->es.Sel;
4585 case X86_SREG_CS: return &pCtx->cs.Sel;
4586 case X86_SREG_SS: return &pCtx->ss.Sel;
4587 case X86_SREG_DS: return &pCtx->ds.Sel;
4588 case X86_SREG_FS: return &pCtx->fs.Sel;
4589 case X86_SREG_GS: return &pCtx->gs.Sel;
4590 }
4591 AssertFailedReturn(NULL);
4592}
4593
4594
4595/**
4596 * Fetches the selector value of a segment register.
4597 *
4598 * @returns The selector value.
4599 * @param pIemCpu The per CPU data.
4600 * @param iSegReg The segment register.
4601 */
4602IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4603{
4604 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4605 switch (iSegReg)
4606 {
4607 case X86_SREG_ES: return pCtx->es.Sel;
4608 case X86_SREG_CS: return pCtx->cs.Sel;
4609 case X86_SREG_SS: return pCtx->ss.Sel;
4610 case X86_SREG_DS: return pCtx->ds.Sel;
4611 case X86_SREG_FS: return pCtx->fs.Sel;
4612 case X86_SREG_GS: return pCtx->gs.Sel;
4613 }
4614 AssertFailedReturn(0xffff);
4615}
4616
4617
4618/**
4619 * Gets a reference (pointer) to the specified general register.
4620 *
4621 * @returns Register reference.
4622 * @param pIemCpu The per CPU data.
4623 * @param iReg The general register.
4624 */
4625IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4626{
4627 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4628 switch (iReg)
4629 {
4630 case X86_GREG_xAX: return &pCtx->rax;
4631 case X86_GREG_xCX: return &pCtx->rcx;
4632 case X86_GREG_xDX: return &pCtx->rdx;
4633 case X86_GREG_xBX: return &pCtx->rbx;
4634 case X86_GREG_xSP: return &pCtx->rsp;
4635 case X86_GREG_xBP: return &pCtx->rbp;
4636 case X86_GREG_xSI: return &pCtx->rsi;
4637 case X86_GREG_xDI: return &pCtx->rdi;
4638 case X86_GREG_x8: return &pCtx->r8;
4639 case X86_GREG_x9: return &pCtx->r9;
4640 case X86_GREG_x10: return &pCtx->r10;
4641 case X86_GREG_x11: return &pCtx->r11;
4642 case X86_GREG_x12: return &pCtx->r12;
4643 case X86_GREG_x13: return &pCtx->r13;
4644 case X86_GREG_x14: return &pCtx->r14;
4645 case X86_GREG_x15: return &pCtx->r15;
4646 }
4647 AssertFailedReturn(NULL);
4648}
4649
4650
4651/**
4652 * Gets a reference (pointer) to the specified 8-bit general register.
4653 *
4654 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4655 *
4656 * @returns Register reference.
4657 * @param pIemCpu The per CPU data.
4658 * @param iReg The register.
4659 */
4660IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4661{
4662 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4663 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4664
4665 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4666 if (iReg >= 4)
4667 pu8Reg++;
4668 return pu8Reg;
4669}
4670
4671
4672/**
4673 * Fetches the value of a 8-bit general register.
4674 *
4675 * @returns The register value.
4676 * @param pIemCpu The per CPU data.
4677 * @param iReg The register.
4678 */
4679IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4680{
4681 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4682 return *pbSrc;
4683}
4684
4685
4686/**
4687 * Fetches the value of a 16-bit general register.
4688 *
4689 * @returns The register value.
4690 * @param pIemCpu The per CPU data.
4691 * @param iReg The register.
4692 */
4693IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4694{
4695 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4696}
4697
4698
4699/**
4700 * Fetches the value of a 32-bit general register.
4701 *
4702 * @returns The register value.
4703 * @param pIemCpu The per CPU data.
4704 * @param iReg The register.
4705 */
4706IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4707{
4708 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4709}
4710
4711
4712/**
4713 * Fetches the value of a 64-bit general register.
4714 *
4715 * @returns The register value.
4716 * @param pIemCpu The per CPU data.
4717 * @param iReg The register.
4718 */
4719IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4720{
4721 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4722}
4723
4724
4725/**
4726 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4727 *
4728 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4729 * segment limit.
4730 *
4731 * @param pIemCpu The per CPU data.
4732 * @param offNextInstr The offset of the next instruction.
4733 */
4734IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4735{
4736 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4737 switch (pIemCpu->enmEffOpSize)
4738 {
4739 case IEMMODE_16BIT:
4740 {
4741 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4742 if ( uNewIp > pCtx->cs.u32Limit
4743 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4744 return iemRaiseGeneralProtectionFault0(pIemCpu);
4745 pCtx->rip = uNewIp;
4746 break;
4747 }
4748
4749 case IEMMODE_32BIT:
4750 {
4751 Assert(pCtx->rip <= UINT32_MAX);
4752 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4753
4754 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4755 if (uNewEip > pCtx->cs.u32Limit)
4756 return iemRaiseGeneralProtectionFault0(pIemCpu);
4757 pCtx->rip = uNewEip;
4758 break;
4759 }
4760
4761 case IEMMODE_64BIT:
4762 {
4763 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4764
4765 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4766 if (!IEM_IS_CANONICAL(uNewRip))
4767 return iemRaiseGeneralProtectionFault0(pIemCpu);
4768 pCtx->rip = uNewRip;
4769 break;
4770 }
4771
4772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4773 }
4774
4775 pCtx->eflags.Bits.u1RF = 0;
4776 return VINF_SUCCESS;
4777}
4778
4779
4780/**
4781 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4782 *
4783 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4784 * segment limit.
4785 *
4786 * @returns Strict VBox status code.
4787 * @param pIemCpu The per CPU data.
4788 * @param offNextInstr The offset of the next instruction.
4789 */
4790IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4791{
4792 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4793 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4794
4795 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4796 if ( uNewIp > pCtx->cs.u32Limit
4797 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4798 return iemRaiseGeneralProtectionFault0(pIemCpu);
4799 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4800 pCtx->rip = uNewIp;
4801 pCtx->eflags.Bits.u1RF = 0;
4802
4803 return VINF_SUCCESS;
4804}
4805
4806
4807/**
4808 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4809 *
4810 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4811 * segment limit.
4812 *
4813 * @returns Strict VBox status code.
4814 * @param pIemCpu The per CPU data.
4815 * @param offNextInstr The offset of the next instruction.
4816 */
4817IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4818{
4819 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4820 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4821
4822 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4823 {
4824 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4825
4826 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4827 if (uNewEip > pCtx->cs.u32Limit)
4828 return iemRaiseGeneralProtectionFault0(pIemCpu);
4829 pCtx->rip = uNewEip;
4830 }
4831 else
4832 {
4833 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4834
4835 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4836 if (!IEM_IS_CANONICAL(uNewRip))
4837 return iemRaiseGeneralProtectionFault0(pIemCpu);
4838 pCtx->rip = uNewRip;
4839 }
4840 pCtx->eflags.Bits.u1RF = 0;
4841 return VINF_SUCCESS;
4842}
4843
4844
4845/**
4846 * Performs a near jump to the specified address.
4847 *
4848 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4849 * segment limit.
4850 *
4851 * @param pIemCpu The per CPU data.
4852 * @param uNewRip The new RIP value.
4853 */
4854IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4855{
4856 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4857 switch (pIemCpu->enmEffOpSize)
4858 {
4859 case IEMMODE_16BIT:
4860 {
4861 Assert(uNewRip <= UINT16_MAX);
4862 if ( uNewRip > pCtx->cs.u32Limit
4863 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4864 return iemRaiseGeneralProtectionFault0(pIemCpu);
4865 /** @todo Test 16-bit jump in 64-bit mode. */
4866 pCtx->rip = uNewRip;
4867 break;
4868 }
4869
4870 case IEMMODE_32BIT:
4871 {
4872 Assert(uNewRip <= UINT32_MAX);
4873 Assert(pCtx->rip <= UINT32_MAX);
4874 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4875
4876 if (uNewRip > pCtx->cs.u32Limit)
4877 return iemRaiseGeneralProtectionFault0(pIemCpu);
4878 pCtx->rip = uNewRip;
4879 break;
4880 }
4881
4882 case IEMMODE_64BIT:
4883 {
4884 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4885
4886 if (!IEM_IS_CANONICAL(uNewRip))
4887 return iemRaiseGeneralProtectionFault0(pIemCpu);
4888 pCtx->rip = uNewRip;
4889 break;
4890 }
4891
4892 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4893 }
4894
4895 pCtx->eflags.Bits.u1RF = 0;
4896 return VINF_SUCCESS;
4897}
4898
4899
4900/**
4901 * Get the address of the top of the stack.
4902 *
4903 * @param pIemCpu The per CPU data.
4904 * @param pCtx The CPU context which SP/ESP/RSP should be
4905 * read.
4906 */
4907DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4908{
4909 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4910 return pCtx->rsp;
4911 if (pCtx->ss.Attr.n.u1DefBig)
4912 return pCtx->esp;
4913 return pCtx->sp;
4914}
4915
4916
4917/**
4918 * Updates the RIP/EIP/IP to point to the next instruction.
4919 *
4920 * This function leaves the EFLAGS.RF flag alone.
4921 *
4922 * @param pIemCpu The per CPU data.
4923 * @param cbInstr The number of bytes to add.
4924 */
4925IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4926{
4927 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4928 switch (pIemCpu->enmCpuMode)
4929 {
4930 case IEMMODE_16BIT:
4931 Assert(pCtx->rip <= UINT16_MAX);
4932 pCtx->eip += cbInstr;
4933 pCtx->eip &= UINT32_C(0xffff);
4934 break;
4935
4936 case IEMMODE_32BIT:
4937 pCtx->eip += cbInstr;
4938 Assert(pCtx->rip <= UINT32_MAX);
4939 break;
4940
4941 case IEMMODE_64BIT:
4942 pCtx->rip += cbInstr;
4943 break;
4944 default: AssertFailed();
4945 }
4946}
4947
4948
4949#if 0
4950/**
4951 * Updates the RIP/EIP/IP to point to the next instruction.
4952 *
4953 * @param pIemCpu The per CPU data.
4954 */
4955IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4956{
4957 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4958}
4959#endif
4960
4961
4962
4963/**
4964 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4965 *
4966 * @param pIemCpu The per CPU data.
4967 * @param cbInstr The number of bytes to add.
4968 */
4969IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4970{
4971 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4972
4973 pCtx->eflags.Bits.u1RF = 0;
4974
4975 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4976 switch (pIemCpu->enmCpuMode)
4977 {
4978 /** @todo investigate if EIP or RIP is really incremented. */
4979 case IEMMODE_16BIT:
4980 case IEMMODE_32BIT:
4981 pCtx->eip += cbInstr;
4982 Assert(pCtx->rip <= UINT32_MAX);
4983 break;
4984
4985 case IEMMODE_64BIT:
4986 pCtx->rip += cbInstr;
4987 break;
4988 default: AssertFailed();
4989 }
4990}
4991
4992
4993/**
4994 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4995 *
4996 * @param pIemCpu The per CPU data.
4997 */
4998IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4999{
5000 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
5001}
5002
5003
5004/**
5005 * Adds to the stack pointer.
5006 *
5007 * @param pIemCpu The per CPU data.
5008 * @param pCtx The CPU context which SP/ESP/RSP should be
5009 * updated.
5010 * @param cbToAdd The number of bytes to add.
5011 */
5012DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
5013{
5014 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5015 pCtx->rsp += cbToAdd;
5016 else if (pCtx->ss.Attr.n.u1DefBig)
5017 pCtx->esp += cbToAdd;
5018 else
5019 pCtx->sp += cbToAdd;
5020}
5021
5022
5023/**
5024 * Subtracts from the stack pointer.
5025 *
5026 * @param pIemCpu The per CPU data.
5027 * @param pCtx The CPU context which SP/ESP/RSP should be
5028 * updated.
5029 * @param cbToSub The number of bytes to subtract.
5030 */
5031DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
5032{
5033 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5034 pCtx->rsp -= cbToSub;
5035 else if (pCtx->ss.Attr.n.u1DefBig)
5036 pCtx->esp -= cbToSub;
5037 else
5038 pCtx->sp -= cbToSub;
5039}
5040
5041
5042/**
5043 * Adds to the temporary stack pointer.
5044 *
5045 * @param pIemCpu The per CPU data.
5046 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5047 * @param cbToAdd The number of bytes to add.
5048 * @param pCtx Where to get the current stack mode.
5049 */
5050DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
5051{
5052 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5053 pTmpRsp->u += cbToAdd;
5054 else if (pCtx->ss.Attr.n.u1DefBig)
5055 pTmpRsp->DWords.dw0 += cbToAdd;
5056 else
5057 pTmpRsp->Words.w0 += cbToAdd;
5058}
5059
5060
5061/**
5062 * Subtracts from the temporary stack pointer.
5063 *
5064 * @param pIemCpu The per CPU data.
5065 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5066 * @param cbToSub The number of bytes to subtract.
5067 * @param pCtx Where to get the current stack mode.
5068 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5069 * expecting that.
5070 */
5071DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5072{
5073 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5074 pTmpRsp->u -= cbToSub;
5075 else if (pCtx->ss.Attr.n.u1DefBig)
5076 pTmpRsp->DWords.dw0 -= cbToSub;
5077 else
5078 pTmpRsp->Words.w0 -= cbToSub;
5079}
5080
5081
5082/**
5083 * Calculates the effective stack address for a push of the specified size as
5084 * well as the new RSP value (upper bits may be masked).
5085 *
5086 * @returns Effective stack addressf for the push.
5087 * @param pIemCpu The IEM per CPU data.
5088 * @param pCtx Where to get the current stack mode.
5089 * @param cbItem The size of the stack item to pop.
5090 * @param puNewRsp Where to return the new RSP value.
5091 */
5092DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5093{
5094 RTUINT64U uTmpRsp;
5095 RTGCPTR GCPtrTop;
5096 uTmpRsp.u = pCtx->rsp;
5097
5098 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5099 GCPtrTop = uTmpRsp.u -= cbItem;
5100 else if (pCtx->ss.Attr.n.u1DefBig)
5101 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5102 else
5103 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5104 *puNewRsp = uTmpRsp.u;
5105 return GCPtrTop;
5106}
5107
5108
5109/**
5110 * Gets the current stack pointer and calculates the value after a pop of the
5111 * specified size.
5112 *
5113 * @returns Current stack pointer.
5114 * @param pIemCpu The per CPU data.
5115 * @param pCtx Where to get the current stack mode.
5116 * @param cbItem The size of the stack item to pop.
5117 * @param puNewRsp Where to return the new RSP value.
5118 */
5119DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5120{
5121 RTUINT64U uTmpRsp;
5122 RTGCPTR GCPtrTop;
5123 uTmpRsp.u = pCtx->rsp;
5124
5125 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5126 {
5127 GCPtrTop = uTmpRsp.u;
5128 uTmpRsp.u += cbItem;
5129 }
5130 else if (pCtx->ss.Attr.n.u1DefBig)
5131 {
5132 GCPtrTop = uTmpRsp.DWords.dw0;
5133 uTmpRsp.DWords.dw0 += cbItem;
5134 }
5135 else
5136 {
5137 GCPtrTop = uTmpRsp.Words.w0;
5138 uTmpRsp.Words.w0 += cbItem;
5139 }
5140 *puNewRsp = uTmpRsp.u;
5141 return GCPtrTop;
5142}
5143
5144
5145/**
5146 * Calculates the effective stack address for a push of the specified size as
5147 * well as the new temporary RSP value (upper bits may be masked).
5148 *
5149 * @returns Effective stack addressf for the push.
5150 * @param pIemCpu The per CPU data.
5151 * @param pCtx Where to get the current stack mode.
5152 * @param pTmpRsp The temporary stack pointer. This is updated.
5153 * @param cbItem The size of the stack item to pop.
5154 */
5155DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5156{
5157 RTGCPTR GCPtrTop;
5158
5159 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5160 GCPtrTop = pTmpRsp->u -= cbItem;
5161 else if (pCtx->ss.Attr.n.u1DefBig)
5162 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5163 else
5164 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5165 return GCPtrTop;
5166}
5167
5168
5169/**
5170 * Gets the effective stack address for a pop of the specified size and
5171 * calculates and updates the temporary RSP.
5172 *
5173 * @returns Current stack pointer.
5174 * @param pIemCpu The per CPU data.
5175 * @param pCtx Where to get the current stack mode.
5176 * @param pTmpRsp The temporary stack pointer. This is updated.
5177 * @param cbItem The size of the stack item to pop.
5178 */
5179DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5180{
5181 RTGCPTR GCPtrTop;
5182 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5183 {
5184 GCPtrTop = pTmpRsp->u;
5185 pTmpRsp->u += cbItem;
5186 }
5187 else if (pCtx->ss.Attr.n.u1DefBig)
5188 {
5189 GCPtrTop = pTmpRsp->DWords.dw0;
5190 pTmpRsp->DWords.dw0 += cbItem;
5191 }
5192 else
5193 {
5194 GCPtrTop = pTmpRsp->Words.w0;
5195 pTmpRsp->Words.w0 += cbItem;
5196 }
5197 return GCPtrTop;
5198}
5199
5200/** @} */
5201
5202
5203/** @name FPU access and helpers.
5204 *
5205 * @{
5206 */
5207
5208
5209/**
5210 * Hook for preparing to use the host FPU.
5211 *
5212 * This is necessary in ring-0 and raw-mode context.
5213 *
5214 * @param pIemCpu The IEM per CPU data.
5215 */
5216DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5217{
5218#ifdef IN_RING3
5219 NOREF(pIemCpu);
5220#else
5221/** @todo RZ: FIXME */
5222//# error "Implement me"
5223#endif
5224}
5225
5226
5227/**
5228 * Hook for preparing to use the host FPU for SSE
5229 *
5230 * This is necessary in ring-0 and raw-mode context.
5231 *
5232 * @param pIemCpu The IEM per CPU data.
5233 */
5234DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5235{
5236 iemFpuPrepareUsage(pIemCpu);
5237}
5238
5239
5240/**
5241 * Stores a QNaN value into a FPU register.
5242 *
5243 * @param pReg Pointer to the register.
5244 */
5245DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5246{
5247 pReg->au32[0] = UINT32_C(0x00000000);
5248 pReg->au32[1] = UINT32_C(0xc0000000);
5249 pReg->au16[4] = UINT16_C(0xffff);
5250}
5251
5252
5253/**
5254 * Updates the FOP, FPU.CS and FPUIP registers.
5255 *
5256 * @param pIemCpu The IEM per CPU data.
5257 * @param pCtx The CPU context.
5258 * @param pFpuCtx The FPU context.
5259 */
5260DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5261{
5262 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5263 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5264 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5265 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5266 {
5267 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5268 * happens in real mode here based on the fnsave and fnstenv images. */
5269 pFpuCtx->CS = 0;
5270 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5271 }
5272 else
5273 {
5274 pFpuCtx->CS = pCtx->cs.Sel;
5275 pFpuCtx->FPUIP = pCtx->rip;
5276 }
5277}
5278
5279
5280/**
5281 * Updates the x87.DS and FPUDP registers.
5282 *
5283 * @param pIemCpu The IEM per CPU data.
5284 * @param pCtx The CPU context.
5285 * @param pFpuCtx The FPU context.
5286 * @param iEffSeg The effective segment register.
5287 * @param GCPtrEff The effective address relative to @a iEffSeg.
5288 */
5289DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5290{
5291 RTSEL sel;
5292 switch (iEffSeg)
5293 {
5294 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5295 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5296 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5297 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5298 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5299 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5300 default:
5301 AssertMsgFailed(("%d\n", iEffSeg));
5302 sel = pCtx->ds.Sel;
5303 }
5304 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5305 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5306 {
5307 pFpuCtx->DS = 0;
5308 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5309 }
5310 else
5311 {
5312 pFpuCtx->DS = sel;
5313 pFpuCtx->FPUDP = GCPtrEff;
5314 }
5315}
5316
5317
5318/**
5319 * Rotates the stack registers in the push direction.
5320 *
5321 * @param pFpuCtx The FPU context.
5322 * @remarks This is a complete waste of time, but fxsave stores the registers in
5323 * stack order.
5324 */
5325DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5326{
5327 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5328 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5329 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5330 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5331 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5332 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5333 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5334 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5335 pFpuCtx->aRegs[0].r80 = r80Tmp;
5336}
5337
5338
5339/**
5340 * Rotates the stack registers in the pop direction.
5341 *
5342 * @param pFpuCtx The FPU context.
5343 * @remarks This is a complete waste of time, but fxsave stores the registers in
5344 * stack order.
5345 */
5346DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5347{
5348 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5349 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5350 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5351 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5352 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5353 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5354 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5355 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5356 pFpuCtx->aRegs[7].r80 = r80Tmp;
5357}
5358
5359
5360/**
5361 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5362 * exception prevents it.
5363 *
5364 * @param pIemCpu The IEM per CPU data.
5365 * @param pResult The FPU operation result to push.
5366 * @param pFpuCtx The FPU context.
5367 */
5368IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5369{
5370 /* Update FSW and bail if there are pending exceptions afterwards. */
5371 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5372 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5373 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5374 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5375 {
5376 pFpuCtx->FSW = fFsw;
5377 return;
5378 }
5379
5380 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5381 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5382 {
5383 /* All is fine, push the actual value. */
5384 pFpuCtx->FTW |= RT_BIT(iNewTop);
5385 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5386 }
5387 else if (pFpuCtx->FCW & X86_FCW_IM)
5388 {
5389 /* Masked stack overflow, push QNaN. */
5390 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5391 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5392 }
5393 else
5394 {
5395 /* Raise stack overflow, don't push anything. */
5396 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5397 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5398 return;
5399 }
5400
5401 fFsw &= ~X86_FSW_TOP_MASK;
5402 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5403 pFpuCtx->FSW = fFsw;
5404
5405 iemFpuRotateStackPush(pFpuCtx);
5406}
5407
5408
5409/**
5410 * Stores a result in a FPU register and updates the FSW and FTW.
5411 *
5412 * @param pFpuCtx The FPU context.
5413 * @param pResult The result to store.
5414 * @param iStReg Which FPU register to store it in.
5415 */
5416IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5417{
5418 Assert(iStReg < 8);
5419 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5420 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5421 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5422 pFpuCtx->FTW |= RT_BIT(iReg);
5423 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5424}
5425
5426
5427/**
5428 * Only updates the FPU status word (FSW) with the result of the current
5429 * instruction.
5430 *
5431 * @param pFpuCtx The FPU context.
5432 * @param u16FSW The FSW output of the current instruction.
5433 */
5434IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5435{
5436 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5437 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5438}
5439
5440
5441/**
5442 * Pops one item off the FPU stack if no pending exception prevents it.
5443 *
5444 * @param pFpuCtx The FPU context.
5445 */
5446IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5447{
5448 /* Check pending exceptions. */
5449 uint16_t uFSW = pFpuCtx->FSW;
5450 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5451 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5452 return;
5453
5454 /* TOP--. */
5455 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5456 uFSW &= ~X86_FSW_TOP_MASK;
5457 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5458 pFpuCtx->FSW = uFSW;
5459
5460 /* Mark the previous ST0 as empty. */
5461 iOldTop >>= X86_FSW_TOP_SHIFT;
5462 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5463
5464 /* Rotate the registers. */
5465 iemFpuRotateStackPop(pFpuCtx);
5466}
5467
5468
5469/**
5470 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5471 *
5472 * @param pIemCpu The IEM per CPU data.
5473 * @param pResult The FPU operation result to push.
5474 */
5475IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5476{
5477 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5478 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5479 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5480 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5481}
5482
5483
5484/**
5485 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5486 * and sets FPUDP and FPUDS.
5487 *
5488 * @param pIemCpu The IEM per CPU data.
5489 * @param pResult The FPU operation result to push.
5490 * @param iEffSeg The effective segment register.
5491 * @param GCPtrEff The effective address relative to @a iEffSeg.
5492 */
5493IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5494{
5495 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5496 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5497 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5498 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5499 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5500}
5501
5502
5503/**
5504 * Replace ST0 with the first value and push the second onto the FPU stack,
5505 * unless a pending exception prevents it.
5506 *
5507 * @param pIemCpu The IEM per CPU data.
5508 * @param pResult The FPU operation result to store and push.
5509 */
5510IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5511{
5512 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5513 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5514 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5515
5516 /* Update FSW and bail if there are pending exceptions afterwards. */
5517 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5518 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5519 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5520 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5521 {
5522 pFpuCtx->FSW = fFsw;
5523 return;
5524 }
5525
5526 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5527 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5528 {
5529 /* All is fine, push the actual value. */
5530 pFpuCtx->FTW |= RT_BIT(iNewTop);
5531 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5532 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5533 }
5534 else if (pFpuCtx->FCW & X86_FCW_IM)
5535 {
5536 /* Masked stack overflow, push QNaN. */
5537 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5538 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5539 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5540 }
5541 else
5542 {
5543 /* Raise stack overflow, don't push anything. */
5544 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5545 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5546 return;
5547 }
5548
5549 fFsw &= ~X86_FSW_TOP_MASK;
5550 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5551 pFpuCtx->FSW = fFsw;
5552
5553 iemFpuRotateStackPush(pFpuCtx);
5554}
5555
5556
5557/**
5558 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5559 * FOP.
5560 *
5561 * @param pIemCpu The IEM per CPU data.
5562 * @param pResult The result to store.
5563 * @param iStReg Which FPU register to store it in.
5564 */
5565IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5566{
5567 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5568 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5569 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5570 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5571}
5572
5573
5574/**
5575 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5576 * FOP, and then pops the stack.
5577 *
5578 * @param pIemCpu The IEM per CPU data.
5579 * @param pResult The result to store.
5580 * @param iStReg Which FPU register to store it in.
5581 */
5582IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5583{
5584 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5585 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5586 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5587 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5588 iemFpuMaybePopOne(pFpuCtx);
5589}
5590
5591
5592/**
5593 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5594 * FPUDP, and FPUDS.
5595 *
5596 * @param pIemCpu The IEM per CPU data.
5597 * @param pResult The result to store.
5598 * @param iStReg Which FPU register to store it in.
5599 * @param iEffSeg The effective memory operand selector register.
5600 * @param GCPtrEff The effective memory operand offset.
5601 */
5602IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5603 uint8_t iEffSeg, RTGCPTR GCPtrEff)
5604{
5605 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5606 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5607 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5608 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5609 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5610}
5611
5612
5613/**
5614 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5615 * FPUDP, and FPUDS, and then pops the stack.
5616 *
5617 * @param pIemCpu The IEM per CPU data.
5618 * @param pResult The result to store.
5619 * @param iStReg Which FPU register to store it in.
5620 * @param iEffSeg The effective memory operand selector register.
5621 * @param GCPtrEff The effective memory operand offset.
5622 */
5623IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5624 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5625{
5626 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5627 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5628 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5629 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5630 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5631 iemFpuMaybePopOne(pFpuCtx);
5632}
5633
5634
5635/**
5636 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5637 *
5638 * @param pIemCpu The IEM per CPU data.
5639 */
5640IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5641{
5642 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5643 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5644 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5645}
5646
5647
5648/**
5649 * Marks the specified stack register as free (for FFREE).
5650 *
5651 * @param pIemCpu The IEM per CPU data.
5652 * @param iStReg The register to free.
5653 */
5654IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5655{
5656 Assert(iStReg < 8);
5657 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5658 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5659 pFpuCtx->FTW &= ~RT_BIT(iReg);
5660}
5661
5662
5663/**
5664 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5665 *
5666 * @param pIemCpu The IEM per CPU data.
5667 */
5668IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5669{
5670 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5671 uint16_t uFsw = pFpuCtx->FSW;
5672 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5673 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5674 uFsw &= ~X86_FSW_TOP_MASK;
5675 uFsw |= uTop;
5676 pFpuCtx->FSW = uFsw;
5677}
5678
5679
5680/**
5681 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5682 *
5683 * @param pIemCpu The IEM per CPU data.
5684 */
5685IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5686{
5687 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5688 uint16_t uFsw = pFpuCtx->FSW;
5689 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5690 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5691 uFsw &= ~X86_FSW_TOP_MASK;
5692 uFsw |= uTop;
5693 pFpuCtx->FSW = uFsw;
5694}
5695
5696
5697/**
5698 * Updates the FSW, FOP, FPUIP, and FPUCS.
5699 *
5700 * @param pIemCpu The IEM per CPU data.
5701 * @param u16FSW The FSW from the current instruction.
5702 */
5703IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5704{
5705 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5706 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5707 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5708 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5709}
5710
5711
5712/**
5713 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5714 *
5715 * @param pIemCpu The IEM per CPU data.
5716 * @param u16FSW The FSW from the current instruction.
5717 */
5718IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5719{
5720 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5721 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5722 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5723 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5724 iemFpuMaybePopOne(pFpuCtx);
5725}
5726
5727
5728/**
5729 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5730 *
5731 * @param pIemCpu The IEM per CPU data.
5732 * @param u16FSW The FSW from the current instruction.
5733 * @param iEffSeg The effective memory operand selector register.
5734 * @param GCPtrEff The effective memory operand offset.
5735 */
5736IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5737{
5738 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5739 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5740 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5741 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5742 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5743}
5744
5745
5746/**
5747 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5748 *
5749 * @param pIemCpu The IEM per CPU data.
5750 * @param u16FSW The FSW from the current instruction.
5751 */
5752IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5753{
5754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5755 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5756 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5757 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5758 iemFpuMaybePopOne(pFpuCtx);
5759 iemFpuMaybePopOne(pFpuCtx);
5760}
5761
5762
5763/**
5764 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5765 *
5766 * @param pIemCpu The IEM per CPU data.
5767 * @param u16FSW The FSW from the current instruction.
5768 * @param iEffSeg The effective memory operand selector register.
5769 * @param GCPtrEff The effective memory operand offset.
5770 */
5771IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5772{
5773 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5774 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5775 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5776 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5777 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5778 iemFpuMaybePopOne(pFpuCtx);
5779}
5780
5781
5782/**
5783 * Worker routine for raising an FPU stack underflow exception.
5784 *
5785 * @param pIemCpu The IEM per CPU data.
5786 * @param pFpuCtx The FPU context.
5787 * @param iStReg The stack register being accessed.
5788 */
5789IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5790{
5791 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5792 if (pFpuCtx->FCW & X86_FCW_IM)
5793 {
5794 /* Masked underflow. */
5795 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5796 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5797 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5798 if (iStReg != UINT8_MAX)
5799 {
5800 pFpuCtx->FTW |= RT_BIT(iReg);
5801 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5802 }
5803 }
5804 else
5805 {
5806 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5807 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5808 }
5809}
5810
5811
5812/**
5813 * Raises a FPU stack underflow exception.
5814 *
5815 * @param pIemCpu The IEM per CPU data.
5816 * @param iStReg The destination register that should be loaded
5817 * with QNaN if \#IS is not masked. Specify
5818 * UINT8_MAX if none (like for fcom).
5819 */
5820DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5821{
5822 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5823 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5824 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5825 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5826}
5827
5828
5829DECL_NO_INLINE(IEM_STATIC, void)
5830iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5831{
5832 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5833 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5834 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5835 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5836 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5837}
5838
5839
5840DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5841{
5842 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5843 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5844 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5845 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5846 iemFpuMaybePopOne(pFpuCtx);
5847}
5848
5849
5850DECL_NO_INLINE(IEM_STATIC, void)
5851iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5852{
5853 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5854 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5855 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5856 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5857 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5858 iemFpuMaybePopOne(pFpuCtx);
5859}
5860
5861
5862DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5863{
5864 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5865 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5866 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5867 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5868 iemFpuMaybePopOne(pFpuCtx);
5869 iemFpuMaybePopOne(pFpuCtx);
5870}
5871
5872
5873DECL_NO_INLINE(IEM_STATIC, void)
5874iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5875{
5876 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5877 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5878 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5879
5880 if (pFpuCtx->FCW & X86_FCW_IM)
5881 {
5882 /* Masked overflow - Push QNaN. */
5883 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5884 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5885 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5886 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5887 pFpuCtx->FTW |= RT_BIT(iNewTop);
5888 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5889 iemFpuRotateStackPush(pFpuCtx);
5890 }
5891 else
5892 {
5893 /* Exception pending - don't change TOP or the register stack. */
5894 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5895 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5896 }
5897}
5898
5899
5900DECL_NO_INLINE(IEM_STATIC, void)
5901iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5902{
5903 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5904 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5905 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5906
5907 if (pFpuCtx->FCW & X86_FCW_IM)
5908 {
5909 /* Masked overflow - Push QNaN. */
5910 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5911 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5912 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5913 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5914 pFpuCtx->FTW |= RT_BIT(iNewTop);
5915 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5916 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5917 iemFpuRotateStackPush(pFpuCtx);
5918 }
5919 else
5920 {
5921 /* Exception pending - don't change TOP or the register stack. */
5922 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5923 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5924 }
5925}
5926
5927
5928/**
5929 * Worker routine for raising an FPU stack overflow exception on a push.
5930 *
5931 * @param pFpuCtx The FPU context.
5932 */
5933IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5934{
5935 if (pFpuCtx->FCW & X86_FCW_IM)
5936 {
5937 /* Masked overflow. */
5938 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5939 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5940 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5941 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5942 pFpuCtx->FTW |= RT_BIT(iNewTop);
5943 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5944 iemFpuRotateStackPush(pFpuCtx);
5945 }
5946 else
5947 {
5948 /* Exception pending - don't change TOP or the register stack. */
5949 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5950 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5951 }
5952}
5953
5954
5955/**
5956 * Raises a FPU stack overflow exception on a push.
5957 *
5958 * @param pIemCpu The IEM per CPU data.
5959 */
5960DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5961{
5962 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5963 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5964 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5965 iemFpuStackPushOverflowOnly(pFpuCtx);
5966}
5967
5968
5969/**
5970 * Raises a FPU stack overflow exception on a push with a memory operand.
5971 *
5972 * @param pIemCpu The IEM per CPU data.
5973 * @param iEffSeg The effective memory operand selector register.
5974 * @param GCPtrEff The effective memory operand offset.
5975 */
5976DECL_NO_INLINE(IEM_STATIC, void)
5977iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5978{
5979 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5980 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5981 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5982 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5983 iemFpuStackPushOverflowOnly(pFpuCtx);
5984}
5985
5986
5987IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5988{
5989 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5990 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5991 if (pFpuCtx->FTW & RT_BIT(iReg))
5992 return VINF_SUCCESS;
5993 return VERR_NOT_FOUND;
5994}
5995
5996
5997IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5998{
5999 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6000 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6001 if (pFpuCtx->FTW & RT_BIT(iReg))
6002 {
6003 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
6004 return VINF_SUCCESS;
6005 }
6006 return VERR_NOT_FOUND;
6007}
6008
6009
6010IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
6011 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
6012{
6013 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6014 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6015 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6016 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6017 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6018 {
6019 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6020 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
6021 return VINF_SUCCESS;
6022 }
6023 return VERR_NOT_FOUND;
6024}
6025
6026
6027IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
6028{
6029 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6030 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6031 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6032 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6033 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6034 {
6035 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6036 return VINF_SUCCESS;
6037 }
6038 return VERR_NOT_FOUND;
6039}
6040
6041
6042/**
6043 * Updates the FPU exception status after FCW is changed.
6044 *
6045 * @param pFpuCtx The FPU context.
6046 */
6047IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
6048{
6049 uint16_t u16Fsw = pFpuCtx->FSW;
6050 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
6051 u16Fsw |= X86_FSW_ES | X86_FSW_B;
6052 else
6053 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
6054 pFpuCtx->FSW = u16Fsw;
6055}
6056
6057
6058/**
6059 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6060 *
6061 * @returns The full FTW.
6062 * @param pFpuCtx The FPU context.
6063 */
6064IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6065{
6066 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6067 uint16_t u16Ftw = 0;
6068 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6069 for (unsigned iSt = 0; iSt < 8; iSt++)
6070 {
6071 unsigned const iReg = (iSt + iTop) & 7;
6072 if (!(u8Ftw & RT_BIT(iReg)))
6073 u16Ftw |= 3 << (iReg * 2); /* empty */
6074 else
6075 {
6076 uint16_t uTag;
6077 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6078 if (pr80Reg->s.uExponent == 0x7fff)
6079 uTag = 2; /* Exponent is all 1's => Special. */
6080 else if (pr80Reg->s.uExponent == 0x0000)
6081 {
6082 if (pr80Reg->s.u64Mantissa == 0x0000)
6083 uTag = 1; /* All bits are zero => Zero. */
6084 else
6085 uTag = 2; /* Must be special. */
6086 }
6087 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6088 uTag = 0; /* Valid. */
6089 else
6090 uTag = 2; /* Must be special. */
6091
6092 u16Ftw |= uTag << (iReg * 2); /* empty */
6093 }
6094 }
6095
6096 return u16Ftw;
6097}
6098
6099
6100/**
6101 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6102 *
6103 * @returns The compressed FTW.
6104 * @param u16FullFtw The full FTW to convert.
6105 */
6106IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6107{
6108 uint8_t u8Ftw = 0;
6109 for (unsigned i = 0; i < 8; i++)
6110 {
6111 if ((u16FullFtw & 3) != 3 /*empty*/)
6112 u8Ftw |= RT_BIT(i);
6113 u16FullFtw >>= 2;
6114 }
6115
6116 return u8Ftw;
6117}
6118
6119/** @} */
6120
6121
6122/** @name Memory access.
6123 *
6124 * @{
6125 */
6126
6127
6128/**
6129 * Updates the IEMCPU::cbWritten counter if applicable.
6130 *
6131 * @param pIemCpu The IEM per CPU data.
6132 * @param fAccess The access being accounted for.
6133 * @param cbMem The access size.
6134 */
6135DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6136{
6137 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6138 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6139 pIemCpu->cbWritten += (uint32_t)cbMem;
6140}
6141
6142
6143/**
6144 * Checks if the given segment can be written to, raise the appropriate
6145 * exception if not.
6146 *
6147 * @returns VBox strict status code.
6148 *
6149 * @param pIemCpu The IEM per CPU data.
6150 * @param pHid Pointer to the hidden register.
6151 * @param iSegReg The register number.
6152 * @param pu64BaseAddr Where to return the base address to use for the
6153 * segment. (In 64-bit code it may differ from the
6154 * base in the hidden segment.)
6155 */
6156IEM_STATIC VBOXSTRICTRC
6157iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6158{
6159 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6160 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6161 else
6162 {
6163 if (!pHid->Attr.n.u1Present)
6164 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6165
6166 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6167 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6168 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6169 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6170 *pu64BaseAddr = pHid->u64Base;
6171 }
6172 return VINF_SUCCESS;
6173}
6174
6175
6176/**
6177 * Checks if the given segment can be read from, raise the appropriate
6178 * exception if not.
6179 *
6180 * @returns VBox strict status code.
6181 *
6182 * @param pIemCpu The IEM per CPU data.
6183 * @param pHid Pointer to the hidden register.
6184 * @param iSegReg The register number.
6185 * @param pu64BaseAddr Where to return the base address to use for the
6186 * segment. (In 64-bit code it may differ from the
6187 * base in the hidden segment.)
6188 */
6189IEM_STATIC VBOXSTRICTRC
6190iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6191{
6192 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6193 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6194 else
6195 {
6196 if (!pHid->Attr.n.u1Present)
6197 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6198
6199 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6200 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6201 *pu64BaseAddr = pHid->u64Base;
6202 }
6203 return VINF_SUCCESS;
6204}
6205
6206
6207/**
6208 * Applies the segment limit, base and attributes.
6209 *
6210 * This may raise a \#GP or \#SS.
6211 *
6212 * @returns VBox strict status code.
6213 *
6214 * @param pIemCpu The IEM per CPU data.
6215 * @param fAccess The kind of access which is being performed.
6216 * @param iSegReg The index of the segment register to apply.
6217 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6218 * TSS, ++).
6219 * @param cbMem The access size.
6220 * @param pGCPtrMem Pointer to the guest memory address to apply
6221 * segmentation to. Input and output parameter.
6222 */
6223IEM_STATIC VBOXSTRICTRC
6224iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6225{
6226 if (iSegReg == UINT8_MAX)
6227 return VINF_SUCCESS;
6228
6229 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6230 switch (pIemCpu->enmCpuMode)
6231 {
6232 case IEMMODE_16BIT:
6233 case IEMMODE_32BIT:
6234 {
6235 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6236 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6237
6238 Assert(pSel->Attr.n.u1Present);
6239 Assert(pSel->Attr.n.u1DescType);
6240 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6241 {
6242 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6243 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6244 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6245
6246 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6247 {
6248 /** @todo CPL check. */
6249 }
6250
6251 /*
6252 * There are two kinds of data selectors, normal and expand down.
6253 */
6254 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6255 {
6256 if ( GCPtrFirst32 > pSel->u32Limit
6257 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6258 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6259 }
6260 else
6261 {
6262 /*
6263 * The upper boundary is defined by the B bit, not the G bit!
6264 */
6265 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6266 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6267 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6268 }
6269 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6270 }
6271 else
6272 {
6273
6274 /*
6275 * Code selector and usually be used to read thru, writing is
6276 * only permitted in real and V8086 mode.
6277 */
6278 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6279 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6280 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6281 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6282 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6283
6284 if ( GCPtrFirst32 > pSel->u32Limit
6285 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6286 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6287
6288 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6289 {
6290 /** @todo CPL check. */
6291 }
6292
6293 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6294 }
6295 return VINF_SUCCESS;
6296 }
6297
6298 case IEMMODE_64BIT:
6299 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6300 *pGCPtrMem += pSel->u64Base;
6301 return VINF_SUCCESS;
6302
6303 default:
6304 AssertFailedReturn(VERR_IEM_IPE_7);
6305 }
6306}
6307
6308
6309/**
6310 * Translates a virtual address to a physical physical address and checks if we
6311 * can access the page as specified.
6312 *
6313 * @param pIemCpu The IEM per CPU data.
6314 * @param GCPtrMem The virtual address.
6315 * @param fAccess The intended access.
6316 * @param pGCPhysMem Where to return the physical address.
6317 */
6318IEM_STATIC VBOXSTRICTRC
6319iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6320{
6321 /** @todo Need a different PGM interface here. We're currently using
6322 * generic / REM interfaces. this won't cut it for R0 & RC. */
6323 RTGCPHYS GCPhys;
6324 uint64_t fFlags;
6325 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6326 if (RT_FAILURE(rc))
6327 {
6328 /** @todo Check unassigned memory in unpaged mode. */
6329 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6330 *pGCPhysMem = NIL_RTGCPHYS;
6331 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6332 }
6333
6334 /* If the page is writable and does not have the no-exec bit set, all
6335 access is allowed. Otherwise we'll have to check more carefully... */
6336 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6337 {
6338 /* Write to read only memory? */
6339 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6340 && !(fFlags & X86_PTE_RW)
6341 && ( pIemCpu->uCpl != 0
6342 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6343 {
6344 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6345 *pGCPhysMem = NIL_RTGCPHYS;
6346 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6347 }
6348
6349 /* Kernel memory accessed by userland? */
6350 if ( !(fFlags & X86_PTE_US)
6351 && pIemCpu->uCpl == 3
6352 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6353 {
6354 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6355 *pGCPhysMem = NIL_RTGCPHYS;
6356 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6357 }
6358
6359 /* Executing non-executable memory? */
6360 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6361 && (fFlags & X86_PTE_PAE_NX)
6362 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6363 {
6364 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6365 *pGCPhysMem = NIL_RTGCPHYS;
6366 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6367 VERR_ACCESS_DENIED);
6368 }
6369 }
6370
6371 /*
6372 * Set the dirty / access flags.
6373 * ASSUMES this is set when the address is translated rather than on committ...
6374 */
6375 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6376 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6377 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6378 {
6379 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6380 AssertRC(rc2);
6381 }
6382
6383 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6384 *pGCPhysMem = GCPhys;
6385 return VINF_SUCCESS;
6386}
6387
6388
6389
6390/**
6391 * Maps a physical page.
6392 *
6393 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6394 * @param pIemCpu The IEM per CPU data.
6395 * @param GCPhysMem The physical address.
6396 * @param fAccess The intended access.
6397 * @param ppvMem Where to return the mapping address.
6398 * @param pLock The PGM lock.
6399 */
6400IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6401{
6402#ifdef IEM_VERIFICATION_MODE_FULL
6403 /* Force the alternative path so we can ignore writes. */
6404 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6405 {
6406 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6407 {
6408 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6409 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6410 if (RT_FAILURE(rc2))
6411 pIemCpu->fProblematicMemory = true;
6412 }
6413 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6414 }
6415#endif
6416#ifdef IEM_LOG_MEMORY_WRITES
6417 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6418 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6419#endif
6420#ifdef IEM_VERIFICATION_MODE_MINIMAL
6421 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6422#endif
6423
6424 /** @todo This API may require some improving later. A private deal with PGM
6425 * regarding locking and unlocking needs to be struct. A couple of TLBs
6426 * living in PGM, but with publicly accessible inlined access methods
6427 * could perhaps be an even better solution. */
6428 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6429 GCPhysMem,
6430 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6431 pIemCpu->fBypassHandlers,
6432 ppvMem,
6433 pLock);
6434 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6435 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6436
6437#ifdef IEM_VERIFICATION_MODE_FULL
6438 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6439 pIemCpu->fProblematicMemory = true;
6440#endif
6441 return rc;
6442}
6443
6444
6445/**
6446 * Unmap a page previously mapped by iemMemPageMap.
6447 *
6448 * @param pIemCpu The IEM per CPU data.
6449 * @param GCPhysMem The physical address.
6450 * @param fAccess The intended access.
6451 * @param pvMem What iemMemPageMap returned.
6452 * @param pLock The PGM lock.
6453 */
6454DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6455{
6456 NOREF(pIemCpu);
6457 NOREF(GCPhysMem);
6458 NOREF(fAccess);
6459 NOREF(pvMem);
6460 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6461}
6462
6463
6464/**
6465 * Looks up a memory mapping entry.
6466 *
6467 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6468 * @param pIemCpu The IEM per CPU data.
6469 * @param pvMem The memory address.
6470 * @param fAccess The access to.
6471 */
6472DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6473{
6474 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
6475 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6476 if ( pIemCpu->aMemMappings[0].pv == pvMem
6477 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6478 return 0;
6479 if ( pIemCpu->aMemMappings[1].pv == pvMem
6480 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6481 return 1;
6482 if ( pIemCpu->aMemMappings[2].pv == pvMem
6483 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6484 return 2;
6485 return VERR_NOT_FOUND;
6486}
6487
6488
6489/**
6490 * Finds a free memmap entry when using iNextMapping doesn't work.
6491 *
6492 * @returns Memory mapping index, 1024 on failure.
6493 * @param pIemCpu The IEM per CPU data.
6494 */
6495IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6496{
6497 /*
6498 * The easy case.
6499 */
6500 if (pIemCpu->cActiveMappings == 0)
6501 {
6502 pIemCpu->iNextMapping = 1;
6503 return 0;
6504 }
6505
6506 /* There should be enough mappings for all instructions. */
6507 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6508
6509 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6510 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6511 return i;
6512
6513 AssertFailedReturn(1024);
6514}
6515
6516
6517/**
6518 * Commits a bounce buffer that needs writing back and unmaps it.
6519 *
6520 * @returns Strict VBox status code.
6521 * @param pIemCpu The IEM per CPU data.
6522 * @param iMemMap The index of the buffer to commit.
6523 */
6524IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6525{
6526 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6527 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6528
6529 /*
6530 * Do the writing.
6531 */
6532#ifndef IEM_VERIFICATION_MODE_MINIMAL
6533 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6534 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6535 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6536 {
6537 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6538 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6539 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6540 if (!pIemCpu->fBypassHandlers)
6541 {
6542 /*
6543 * Carefully and efficiently dealing with access handler return
6544 * codes make this a little bloated.
6545 */
6546 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6547 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6548 pbBuf,
6549 cbFirst,
6550 PGMACCESSORIGIN_IEM);
6551 if (rcStrict == VINF_SUCCESS)
6552 {
6553 if (cbSecond)
6554 {
6555 rcStrict = PGMPhysWrite(pVM,
6556 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6557 pbBuf + cbFirst,
6558 cbSecond,
6559 PGMACCESSORIGIN_IEM);
6560 if (rcStrict == VINF_SUCCESS)
6561 { /* nothing */ }
6562 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6563 {
6564 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6565 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6566 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6567 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6568 }
6569 else
6570 {
6571 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6572 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6573 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6574 return rcStrict;
6575 }
6576 }
6577 }
6578 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6579 {
6580 if (!cbSecond)
6581 {
6582 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6583 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6584 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6585 }
6586 else
6587 {
6588 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6589 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6590 pbBuf + cbFirst,
6591 cbSecond,
6592 PGMACCESSORIGIN_IEM);
6593 if (rcStrict2 == VINF_SUCCESS)
6594 {
6595 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6596 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6597 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6598 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6599 }
6600 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6601 {
6602 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6603 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6604 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6605 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6606 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6607 }
6608 else
6609 {
6610 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6611 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6612 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6613 return rcStrict2;
6614 }
6615 }
6616 }
6617 else
6618 {
6619 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6620 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6621 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6622 return rcStrict;
6623 }
6624 }
6625 else
6626 {
6627 /*
6628 * No access handlers, much simpler.
6629 */
6630 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6631 if (RT_SUCCESS(rc))
6632 {
6633 if (cbSecond)
6634 {
6635 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6636 if (RT_SUCCESS(rc))
6637 { /* likely */ }
6638 else
6639 {
6640 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6641 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6642 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6643 return rc;
6644 }
6645 }
6646 }
6647 else
6648 {
6649 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6650 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6651 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6652 return rc;
6653 }
6654 }
6655 }
6656#endif
6657
6658#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6659 /*
6660 * Record the write(s).
6661 */
6662 if (!pIemCpu->fNoRem)
6663 {
6664 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6665 if (pEvtRec)
6666 {
6667 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6668 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6669 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6670 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6671 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6672 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6673 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6674 }
6675 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6676 {
6677 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6678 if (pEvtRec)
6679 {
6680 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6681 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6682 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6683 memcpy(pEvtRec->u.RamWrite.ab,
6684 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6685 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6686 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6687 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6688 }
6689 }
6690 }
6691#endif
6692#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6693 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6694 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6695 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6696 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6697 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6698 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6699
6700 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6701 g_cbIemWrote = cbWrote;
6702 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6703#endif
6704
6705 /*
6706 * Free the mapping entry.
6707 */
6708 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6709 Assert(pIemCpu->cActiveMappings != 0);
6710 pIemCpu->cActiveMappings--;
6711 return VINF_SUCCESS;
6712}
6713
6714
6715/**
6716 * iemMemMap worker that deals with a request crossing pages.
6717 */
6718IEM_STATIC VBOXSTRICTRC
6719iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6720{
6721 /*
6722 * Do the address translations.
6723 */
6724 RTGCPHYS GCPhysFirst;
6725 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6726 if (rcStrict != VINF_SUCCESS)
6727 return rcStrict;
6728
6729 RTGCPHYS GCPhysSecond;
6730 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
6731 fAccess, &GCPhysSecond);
6732 if (rcStrict != VINF_SUCCESS)
6733 return rcStrict;
6734 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6735
6736 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6737#ifdef IEM_VERIFICATION_MODE_FULL
6738 /*
6739 * Detect problematic memory when verifying so we can select
6740 * the right execution engine. (TLB: Redo this.)
6741 */
6742 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6743 {
6744 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6745 if (RT_SUCCESS(rc2))
6746 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6747 if (RT_FAILURE(rc2))
6748 pIemCpu->fProblematicMemory = true;
6749 }
6750#endif
6751
6752
6753 /*
6754 * Read in the current memory content if it's a read, execute or partial
6755 * write access.
6756 */
6757 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6758 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6759 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6760
6761 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6762 {
6763 if (!pIemCpu->fBypassHandlers)
6764 {
6765 /*
6766 * Must carefully deal with access handler status codes here,
6767 * makes the code a bit bloated.
6768 */
6769 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6770 if (rcStrict == VINF_SUCCESS)
6771 {
6772 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6773 if (rcStrict == VINF_SUCCESS)
6774 { /*likely */ }
6775 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6776 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6777 else
6778 {
6779 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6780 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6781 return rcStrict;
6782 }
6783 }
6784 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6785 {
6786 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6787 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6788 {
6789 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6790 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6791 }
6792 else
6793 {
6794 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6795 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6796 return rcStrict2;
6797 }
6798 }
6799 else
6800 {
6801 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6802 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6803 return rcStrict;
6804 }
6805 }
6806 else
6807 {
6808 /*
6809 * No informational status codes here, much more straight forward.
6810 */
6811 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6812 if (RT_SUCCESS(rc))
6813 {
6814 Assert(rc == VINF_SUCCESS);
6815 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6816 if (RT_SUCCESS(rc))
6817 Assert(rc == VINF_SUCCESS);
6818 else
6819 {
6820 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6821 return rc;
6822 }
6823 }
6824 else
6825 {
6826 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6827 return rc;
6828 }
6829 }
6830
6831#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6832 if ( !pIemCpu->fNoRem
6833 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6834 {
6835 /*
6836 * Record the reads.
6837 */
6838 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6839 if (pEvtRec)
6840 {
6841 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6842 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6843 pEvtRec->u.RamRead.cb = cbFirstPage;
6844 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6845 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6846 }
6847 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6848 if (pEvtRec)
6849 {
6850 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6851 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6852 pEvtRec->u.RamRead.cb = cbSecondPage;
6853 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6854 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6855 }
6856 }
6857#endif
6858 }
6859#ifdef VBOX_STRICT
6860 else
6861 memset(pbBuf, 0xcc, cbMem);
6862 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6863 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6864#endif
6865
6866 /*
6867 * Commit the bounce buffer entry.
6868 */
6869 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6870 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6871 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6872 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6873 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6874 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6875 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6876 pIemCpu->iNextMapping = iMemMap + 1;
6877 pIemCpu->cActiveMappings++;
6878
6879 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6880 *ppvMem = pbBuf;
6881 return VINF_SUCCESS;
6882}
6883
6884
6885/**
6886 * iemMemMap woker that deals with iemMemPageMap failures.
6887 */
6888IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6889 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6890{
6891 /*
6892 * Filter out conditions we can handle and the ones which shouldn't happen.
6893 */
6894 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6895 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6896 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6897 {
6898 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6899 return rcMap;
6900 }
6901 pIemCpu->cPotentialExits++;
6902
6903 /*
6904 * Read in the current memory content if it's a read, execute or partial
6905 * write access.
6906 */
6907 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6908 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6909 {
6910 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6911 memset(pbBuf, 0xff, cbMem);
6912 else
6913 {
6914 int rc;
6915 if (!pIemCpu->fBypassHandlers)
6916 {
6917 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6918 if (rcStrict == VINF_SUCCESS)
6919 { /* nothing */ }
6920 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6921 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6922 else
6923 {
6924 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6925 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6926 return rcStrict;
6927 }
6928 }
6929 else
6930 {
6931 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6932 if (RT_SUCCESS(rc))
6933 { /* likely */ }
6934 else
6935 {
6936 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6937 GCPhysFirst, rc));
6938 return rc;
6939 }
6940 }
6941 }
6942
6943#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6944 if ( !pIemCpu->fNoRem
6945 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6946 {
6947 /*
6948 * Record the read.
6949 */
6950 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6951 if (pEvtRec)
6952 {
6953 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6954 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6955 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6956 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6957 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6958 }
6959 }
6960#endif
6961 }
6962#ifdef VBOX_STRICT
6963 else
6964 memset(pbBuf, 0xcc, cbMem);
6965#endif
6966#ifdef VBOX_STRICT
6967 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6968 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6969#endif
6970
6971 /*
6972 * Commit the bounce buffer entry.
6973 */
6974 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6975 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6976 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6977 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6978 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6979 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6980 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6981 pIemCpu->iNextMapping = iMemMap + 1;
6982 pIemCpu->cActiveMappings++;
6983
6984 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6985 *ppvMem = pbBuf;
6986 return VINF_SUCCESS;
6987}
6988
6989
6990
6991/**
6992 * Maps the specified guest memory for the given kind of access.
6993 *
6994 * This may be using bounce buffering of the memory if it's crossing a page
6995 * boundary or if there is an access handler installed for any of it. Because
6996 * of lock prefix guarantees, we're in for some extra clutter when this
6997 * happens.
6998 *
6999 * This may raise a \#GP, \#SS, \#PF or \#AC.
7000 *
7001 * @returns VBox strict status code.
7002 *
7003 * @param pIemCpu The IEM per CPU data.
7004 * @param ppvMem Where to return the pointer to the mapped
7005 * memory.
7006 * @param cbMem The number of bytes to map. This is usually 1,
7007 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7008 * string operations it can be up to a page.
7009 * @param iSegReg The index of the segment register to use for
7010 * this access. The base and limits are checked.
7011 * Use UINT8_MAX to indicate that no segmentation
7012 * is required (for IDT, GDT and LDT accesses).
7013 * @param GCPtrMem The address of the guest memory.
7014 * @param fAccess How the memory is being accessed. The
7015 * IEM_ACCESS_TYPE_XXX bit is used to figure out
7016 * how to map the memory, while the
7017 * IEM_ACCESS_WHAT_XXX bit is used when raising
7018 * exceptions.
7019 */
7020IEM_STATIC VBOXSTRICTRC
7021iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
7022{
7023 /*
7024 * Check the input and figure out which mapping entry to use.
7025 */
7026 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7027 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
7028 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
7029
7030 unsigned iMemMap = pIemCpu->iNextMapping;
7031 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
7032 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7033 {
7034 iMemMap = iemMemMapFindFree(pIemCpu);
7035 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
7036 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
7037 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
7038 pIemCpu->aMemMappings[2].fAccess),
7039 VERR_IEM_IPE_9);
7040 }
7041
7042 /*
7043 * Map the memory, checking that we can actually access it. If something
7044 * slightly complicated happens, fall back on bounce buffering.
7045 */
7046 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7047 if (rcStrict != VINF_SUCCESS)
7048 return rcStrict;
7049
7050 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
7051 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
7052
7053 RTGCPHYS GCPhysFirst;
7054 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
7055 if (rcStrict != VINF_SUCCESS)
7056 return rcStrict;
7057
7058 void *pvMem;
7059 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7060 if (rcStrict != VINF_SUCCESS)
7061 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
7062
7063 /*
7064 * Fill in the mapping table entry.
7065 */
7066 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7067 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7068 pIemCpu->iNextMapping = iMemMap + 1;
7069 pIemCpu->cActiveMappings++;
7070
7071 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7072 *ppvMem = pvMem;
7073 return VINF_SUCCESS;
7074}
7075
7076
7077/**
7078 * Commits the guest memory if bounce buffered and unmaps it.
7079 *
7080 * @returns Strict VBox status code.
7081 * @param pIemCpu The IEM per CPU data.
7082 * @param pvMem The mapping.
7083 * @param fAccess The kind of access.
7084 */
7085IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7086{
7087 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7088 AssertReturn(iMemMap >= 0, iMemMap);
7089
7090 /* If it's bounce buffered, we may need to write back the buffer. */
7091 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7092 {
7093 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7094 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7095 }
7096 /* Otherwise unlock it. */
7097 else
7098 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7099
7100 /* Free the entry. */
7101 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7102 Assert(pIemCpu->cActiveMappings != 0);
7103 pIemCpu->cActiveMappings--;
7104 return VINF_SUCCESS;
7105}
7106
7107
7108/**
7109 * Rollbacks mappings, releasing page locks and such.
7110 *
7111 * The caller shall only call this after checking cActiveMappings.
7112 *
7113 * @returns Strict VBox status code to pass up.
7114 * @param pIemCpu The IEM per CPU data.
7115 */
7116IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7117{
7118 Assert(pIemCpu->cActiveMappings > 0);
7119
7120 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7121 while (iMemMap-- > 0)
7122 {
7123 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7124 if (fAccess != IEM_ACCESS_INVALID)
7125 {
7126 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7127 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7128 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7129 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7130 Assert(pIemCpu->cActiveMappings > 0);
7131 pIemCpu->cActiveMappings--;
7132 }
7133 }
7134}
7135
7136
7137/**
7138 * Fetches a data byte.
7139 *
7140 * @returns Strict VBox status code.
7141 * @param pIemCpu The IEM per CPU data.
7142 * @param pu8Dst Where to return the byte.
7143 * @param iSegReg The index of the segment register to use for
7144 * this access. The base and limits are checked.
7145 * @param GCPtrMem The address of the guest memory.
7146 */
7147IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7148{
7149 /* The lazy approach for now... */
7150 uint8_t const *pu8Src;
7151 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7152 if (rc == VINF_SUCCESS)
7153 {
7154 *pu8Dst = *pu8Src;
7155 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7156 }
7157 return rc;
7158}
7159
7160
7161/**
7162 * Fetches a data word.
7163 *
7164 * @returns Strict VBox status code.
7165 * @param pIemCpu The IEM per CPU data.
7166 * @param pu16Dst Where to return the word.
7167 * @param iSegReg The index of the segment register to use for
7168 * this access. The base and limits are checked.
7169 * @param GCPtrMem The address of the guest memory.
7170 */
7171IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7172{
7173 /* The lazy approach for now... */
7174 uint16_t const *pu16Src;
7175 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7176 if (rc == VINF_SUCCESS)
7177 {
7178 *pu16Dst = *pu16Src;
7179 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7180 }
7181 return rc;
7182}
7183
7184
7185/**
7186 * Fetches a data dword.
7187 *
7188 * @returns Strict VBox status code.
7189 * @param pIemCpu The IEM per CPU data.
7190 * @param pu32Dst Where to return the dword.
7191 * @param iSegReg The index of the segment register to use for
7192 * this access. The base and limits are checked.
7193 * @param GCPtrMem The address of the guest memory.
7194 */
7195IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7196{
7197 /* The lazy approach for now... */
7198 uint32_t const *pu32Src;
7199 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7200 if (rc == VINF_SUCCESS)
7201 {
7202 *pu32Dst = *pu32Src;
7203 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7204 }
7205 return rc;
7206}
7207
7208
7209#ifdef SOME_UNUSED_FUNCTION
7210/**
7211 * Fetches a data dword and sign extends it to a qword.
7212 *
7213 * @returns Strict VBox status code.
7214 * @param pIemCpu The IEM per CPU data.
7215 * @param pu64Dst Where to return the sign extended value.
7216 * @param iSegReg The index of the segment register to use for
7217 * this access. The base and limits are checked.
7218 * @param GCPtrMem The address of the guest memory.
7219 */
7220IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7221{
7222 /* The lazy approach for now... */
7223 int32_t const *pi32Src;
7224 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7225 if (rc == VINF_SUCCESS)
7226 {
7227 *pu64Dst = *pi32Src;
7228 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7229 }
7230#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7231 else
7232 *pu64Dst = 0;
7233#endif
7234 return rc;
7235}
7236#endif
7237
7238
7239/**
7240 * Fetches a data qword.
7241 *
7242 * @returns Strict VBox status code.
7243 * @param pIemCpu The IEM per CPU data.
7244 * @param pu64Dst Where to return the qword.
7245 * @param iSegReg The index of the segment register to use for
7246 * this access. The base and limits are checked.
7247 * @param GCPtrMem The address of the guest memory.
7248 */
7249IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7250{
7251 /* The lazy approach for now... */
7252 uint64_t const *pu64Src;
7253 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7254 if (rc == VINF_SUCCESS)
7255 {
7256 *pu64Dst = *pu64Src;
7257 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7258 }
7259 return rc;
7260}
7261
7262
7263/**
7264 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7265 *
7266 * @returns Strict VBox status code.
7267 * @param pIemCpu The IEM per CPU data.
7268 * @param pu64Dst Where to return the qword.
7269 * @param iSegReg The index of the segment register to use for
7270 * this access. The base and limits are checked.
7271 * @param GCPtrMem The address of the guest memory.
7272 */
7273IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7274{
7275 /* The lazy approach for now... */
7276 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7277 if (RT_UNLIKELY(GCPtrMem & 15))
7278 return iemRaiseGeneralProtectionFault0(pIemCpu);
7279
7280 uint64_t const *pu64Src;
7281 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7282 if (rc == VINF_SUCCESS)
7283 {
7284 *pu64Dst = *pu64Src;
7285 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7286 }
7287 return rc;
7288}
7289
7290
7291/**
7292 * Fetches a data tword.
7293 *
7294 * @returns Strict VBox status code.
7295 * @param pIemCpu The IEM per CPU data.
7296 * @param pr80Dst Where to return the tword.
7297 * @param iSegReg The index of the segment register to use for
7298 * this access. The base and limits are checked.
7299 * @param GCPtrMem The address of the guest memory.
7300 */
7301IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7302{
7303 /* The lazy approach for now... */
7304 PCRTFLOAT80U pr80Src;
7305 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7306 if (rc == VINF_SUCCESS)
7307 {
7308 *pr80Dst = *pr80Src;
7309 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7310 }
7311 return rc;
7312}
7313
7314
7315/**
7316 * Fetches a data dqword (double qword), generally SSE related.
7317 *
7318 * @returns Strict VBox status code.
7319 * @param pIemCpu The IEM per CPU data.
7320 * @param pu128Dst Where to return the qword.
7321 * @param iSegReg The index of the segment register to use for
7322 * this access. The base and limits are checked.
7323 * @param GCPtrMem The address of the guest memory.
7324 */
7325IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7326{
7327 /* The lazy approach for now... */
7328 uint128_t const *pu128Src;
7329 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7330 if (rc == VINF_SUCCESS)
7331 {
7332 *pu128Dst = *pu128Src;
7333 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7334 }
7335 return rc;
7336}
7337
7338
7339/**
7340 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7341 * related.
7342 *
7343 * Raises \#GP(0) if not aligned.
7344 *
7345 * @returns Strict VBox status code.
7346 * @param pIemCpu The IEM per CPU data.
7347 * @param pu128Dst Where to return the qword.
7348 * @param iSegReg The index of the segment register to use for
7349 * this access. The base and limits are checked.
7350 * @param GCPtrMem The address of the guest memory.
7351 */
7352IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7353{
7354 /* The lazy approach for now... */
7355 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7356 if ( (GCPtrMem & 15)
7357 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7358 return iemRaiseGeneralProtectionFault0(pIemCpu);
7359
7360 uint128_t const *pu128Src;
7361 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7362 if (rc == VINF_SUCCESS)
7363 {
7364 *pu128Dst = *pu128Src;
7365 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7366 }
7367 return rc;
7368}
7369
7370
7371
7372
7373/**
7374 * Fetches a descriptor register (lgdt, lidt).
7375 *
7376 * @returns Strict VBox status code.
7377 * @param pIemCpu The IEM per CPU data.
7378 * @param pcbLimit Where to return the limit.
7379 * @param pGCPtrBase Where to return the base.
7380 * @param iSegReg The index of the segment register to use for
7381 * this access. The base and limits are checked.
7382 * @param GCPtrMem The address of the guest memory.
7383 * @param enmOpSize The effective operand size.
7384 */
7385IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7386 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7387{
7388 uint8_t const *pu8Src;
7389 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7390 (void **)&pu8Src,
7391 enmOpSize == IEMMODE_64BIT
7392 ? 2 + 8
7393 : enmOpSize == IEMMODE_32BIT
7394 ? 2 + 4
7395 : 2 + 3,
7396 iSegReg,
7397 GCPtrMem,
7398 IEM_ACCESS_DATA_R);
7399 if (rcStrict == VINF_SUCCESS)
7400 {
7401 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7402 switch (enmOpSize)
7403 {
7404 case IEMMODE_16BIT:
7405 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7406 break;
7407 case IEMMODE_32BIT:
7408 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7409 break;
7410 case IEMMODE_64BIT:
7411 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7412 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7413 break;
7414
7415 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7416 }
7417 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7418 }
7419 return rcStrict;
7420}
7421
7422
7423
7424/**
7425 * Stores a data byte.
7426 *
7427 * @returns Strict VBox status code.
7428 * @param pIemCpu The IEM per CPU data.
7429 * @param iSegReg The index of the segment register to use for
7430 * this access. The base and limits are checked.
7431 * @param GCPtrMem The address of the guest memory.
7432 * @param u8Value The value to store.
7433 */
7434IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7435{
7436 /* The lazy approach for now... */
7437 uint8_t *pu8Dst;
7438 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7439 if (rc == VINF_SUCCESS)
7440 {
7441 *pu8Dst = u8Value;
7442 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7443 }
7444 return rc;
7445}
7446
7447
7448/**
7449 * Stores a data word.
7450 *
7451 * @returns Strict VBox status code.
7452 * @param pIemCpu The IEM per CPU data.
7453 * @param iSegReg The index of the segment register to use for
7454 * this access. The base and limits are checked.
7455 * @param GCPtrMem The address of the guest memory.
7456 * @param u16Value The value to store.
7457 */
7458IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7459{
7460 /* The lazy approach for now... */
7461 uint16_t *pu16Dst;
7462 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7463 if (rc == VINF_SUCCESS)
7464 {
7465 *pu16Dst = u16Value;
7466 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7467 }
7468 return rc;
7469}
7470
7471
7472/**
7473 * Stores a data dword.
7474 *
7475 * @returns Strict VBox status code.
7476 * @param pIemCpu The IEM per CPU data.
7477 * @param iSegReg The index of the segment register to use for
7478 * this access. The base and limits are checked.
7479 * @param GCPtrMem The address of the guest memory.
7480 * @param u32Value The value to store.
7481 */
7482IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7483{
7484 /* The lazy approach for now... */
7485 uint32_t *pu32Dst;
7486 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7487 if (rc == VINF_SUCCESS)
7488 {
7489 *pu32Dst = u32Value;
7490 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7491 }
7492 return rc;
7493}
7494
7495
7496/**
7497 * Stores a data qword.
7498 *
7499 * @returns Strict VBox status code.
7500 * @param pIemCpu The IEM per CPU data.
7501 * @param iSegReg The index of the segment register to use for
7502 * this access. The base and limits are checked.
7503 * @param GCPtrMem The address of the guest memory.
7504 * @param u64Value The value to store.
7505 */
7506IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7507{
7508 /* The lazy approach for now... */
7509 uint64_t *pu64Dst;
7510 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7511 if (rc == VINF_SUCCESS)
7512 {
7513 *pu64Dst = u64Value;
7514 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7515 }
7516 return rc;
7517}
7518
7519
7520/**
7521 * Stores a data dqword.
7522 *
7523 * @returns Strict VBox status code.
7524 * @param pIemCpu The IEM per CPU data.
7525 * @param iSegReg The index of the segment register to use for
7526 * this access. The base and limits are checked.
7527 * @param GCPtrMem The address of the guest memory.
7528 * @param u128Value The value to store.
7529 */
7530IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7531{
7532 /* The lazy approach for now... */
7533 uint128_t *pu128Dst;
7534 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7535 if (rc == VINF_SUCCESS)
7536 {
7537 *pu128Dst = u128Value;
7538 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7539 }
7540 return rc;
7541}
7542
7543
7544/**
7545 * Stores a data dqword, SSE aligned.
7546 *
7547 * @returns Strict VBox status code.
7548 * @param pIemCpu The IEM per CPU data.
7549 * @param iSegReg The index of the segment register to use for
7550 * this access. The base and limits are checked.
7551 * @param GCPtrMem The address of the guest memory.
7552 * @param u128Value The value to store.
7553 */
7554IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7555{
7556 /* The lazy approach for now... */
7557 if ( (GCPtrMem & 15)
7558 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7559 return iemRaiseGeneralProtectionFault0(pIemCpu);
7560
7561 uint128_t *pu128Dst;
7562 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7563 if (rc == VINF_SUCCESS)
7564 {
7565 *pu128Dst = u128Value;
7566 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7567 }
7568 return rc;
7569}
7570
7571
7572/**
7573 * Stores a descriptor register (sgdt, sidt).
7574 *
7575 * @returns Strict VBox status code.
7576 * @param pIemCpu The IEM per CPU data.
7577 * @param cbLimit The limit.
7578 * @param GCPtrBase The base address.
7579 * @param iSegReg The index of the segment register to use for
7580 * this access. The base and limits are checked.
7581 * @param GCPtrMem The address of the guest memory.
7582 */
7583IEM_STATIC VBOXSTRICTRC
7584iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
7585{
7586 /*
7587 * The SIDT and SGDT instructions actually stores the data using two
7588 * independent writes. The instructions does not respond to opsize prefixes.
7589 */
7590 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pIemCpu, iSegReg, GCPtrMem, cbLimit);
7591 if (rcStrict == VINF_SUCCESS)
7592 {
7593 if (pIemCpu->enmCpuMode == IEMMODE_16BIT)
7594 rcStrict = iemMemStoreDataU32(pIemCpu, iSegReg, GCPtrMem + 2,
7595 IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_286
7596 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000)
7597 : (uint32_t)GCPtrBase & UINT32_C(0x00ffffff));
7598 else if (pIemCpu->enmCpuMode == IEMMODE_32BIT)
7599 rcStrict = iemMemStoreDataU32(pIemCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7600 else
7601 rcStrict = iemMemStoreDataU64(pIemCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7602 }
7603 return rcStrict;
7604}
7605
7606
7607/**
7608 * Pushes a word onto the stack.
7609 *
7610 * @returns Strict VBox status code.
7611 * @param pIemCpu The IEM per CPU data.
7612 * @param u16Value The value to push.
7613 */
7614IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7615{
7616 /* Increment the stack pointer. */
7617 uint64_t uNewRsp;
7618 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7619 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7620
7621 /* Write the word the lazy way. */
7622 uint16_t *pu16Dst;
7623 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7624 if (rc == VINF_SUCCESS)
7625 {
7626 *pu16Dst = u16Value;
7627 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7628 }
7629
7630 /* Commit the new RSP value unless we an access handler made trouble. */
7631 if (rc == VINF_SUCCESS)
7632 pCtx->rsp = uNewRsp;
7633
7634 return rc;
7635}
7636
7637
7638/**
7639 * Pushes a dword onto the stack.
7640 *
7641 * @returns Strict VBox status code.
7642 * @param pIemCpu The IEM per CPU data.
7643 * @param u32Value The value to push.
7644 */
7645IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7646{
7647 /* Increment the stack pointer. */
7648 uint64_t uNewRsp;
7649 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7650 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7651
7652 /* Write the dword the lazy way. */
7653 uint32_t *pu32Dst;
7654 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7655 if (rc == VINF_SUCCESS)
7656 {
7657 *pu32Dst = u32Value;
7658 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7659 }
7660
7661 /* Commit the new RSP value unless we an access handler made trouble. */
7662 if (rc == VINF_SUCCESS)
7663 pCtx->rsp = uNewRsp;
7664
7665 return rc;
7666}
7667
7668
7669/**
7670 * Pushes a dword segment register value onto the stack.
7671 *
7672 * @returns Strict VBox status code.
7673 * @param pIemCpu The IEM per CPU data.
7674 * @param u32Value The value to push.
7675 */
7676IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7677{
7678 /* Increment the stack pointer. */
7679 uint64_t uNewRsp;
7680 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7681 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7682
7683 VBOXSTRICTRC rc;
7684 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7685 {
7686 /* The recompiler writes a full dword. */
7687 uint32_t *pu32Dst;
7688 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7689 if (rc == VINF_SUCCESS)
7690 {
7691 *pu32Dst = u32Value;
7692 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7693 }
7694 }
7695 else
7696 {
7697 /* The intel docs talks about zero extending the selector register
7698 value. My actual intel CPU here might be zero extending the value
7699 but it still only writes the lower word... */
7700 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7701 * happens when crossing an electric page boundrary, is the high word checked
7702 * for write accessibility or not? Probably it is. What about segment limits?
7703 * It appears this behavior is also shared with trap error codes.
7704 *
7705 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7706 * ancient hardware when it actually did change. */
7707 uint16_t *pu16Dst;
7708 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7709 if (rc == VINF_SUCCESS)
7710 {
7711 *pu16Dst = (uint16_t)u32Value;
7712 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7713 }
7714 }
7715
7716 /* Commit the new RSP value unless we an access handler made trouble. */
7717 if (rc == VINF_SUCCESS)
7718 pCtx->rsp = uNewRsp;
7719
7720 return rc;
7721}
7722
7723
7724/**
7725 * Pushes a qword onto the stack.
7726 *
7727 * @returns Strict VBox status code.
7728 * @param pIemCpu The IEM per CPU data.
7729 * @param u64Value The value to push.
7730 */
7731IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7732{
7733 /* Increment the stack pointer. */
7734 uint64_t uNewRsp;
7735 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7736 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7737
7738 /* Write the word the lazy way. */
7739 uint64_t *pu64Dst;
7740 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7741 if (rc == VINF_SUCCESS)
7742 {
7743 *pu64Dst = u64Value;
7744 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7745 }
7746
7747 /* Commit the new RSP value unless we an access handler made trouble. */
7748 if (rc == VINF_SUCCESS)
7749 pCtx->rsp = uNewRsp;
7750
7751 return rc;
7752}
7753
7754
7755/**
7756 * Pops a word from the stack.
7757 *
7758 * @returns Strict VBox status code.
7759 * @param pIemCpu The IEM per CPU data.
7760 * @param pu16Value Where to store the popped value.
7761 */
7762IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7763{
7764 /* Increment the stack pointer. */
7765 uint64_t uNewRsp;
7766 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7767 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7768
7769 /* Write the word the lazy way. */
7770 uint16_t const *pu16Src;
7771 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7772 if (rc == VINF_SUCCESS)
7773 {
7774 *pu16Value = *pu16Src;
7775 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7776
7777 /* Commit the new RSP value. */
7778 if (rc == VINF_SUCCESS)
7779 pCtx->rsp = uNewRsp;
7780 }
7781
7782 return rc;
7783}
7784
7785
7786/**
7787 * Pops a dword from the stack.
7788 *
7789 * @returns Strict VBox status code.
7790 * @param pIemCpu The IEM per CPU data.
7791 * @param pu32Value Where to store the popped value.
7792 */
7793IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7794{
7795 /* Increment the stack pointer. */
7796 uint64_t uNewRsp;
7797 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7798 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7799
7800 /* Write the word the lazy way. */
7801 uint32_t const *pu32Src;
7802 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7803 if (rc == VINF_SUCCESS)
7804 {
7805 *pu32Value = *pu32Src;
7806 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7807
7808 /* Commit the new RSP value. */
7809 if (rc == VINF_SUCCESS)
7810 pCtx->rsp = uNewRsp;
7811 }
7812
7813 return rc;
7814}
7815
7816
7817/**
7818 * Pops a qword from the stack.
7819 *
7820 * @returns Strict VBox status code.
7821 * @param pIemCpu The IEM per CPU data.
7822 * @param pu64Value Where to store the popped value.
7823 */
7824IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7825{
7826 /* Increment the stack pointer. */
7827 uint64_t uNewRsp;
7828 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7829 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7830
7831 /* Write the word the lazy way. */
7832 uint64_t const *pu64Src;
7833 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7834 if (rc == VINF_SUCCESS)
7835 {
7836 *pu64Value = *pu64Src;
7837 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7838
7839 /* Commit the new RSP value. */
7840 if (rc == VINF_SUCCESS)
7841 pCtx->rsp = uNewRsp;
7842 }
7843
7844 return rc;
7845}
7846
7847
7848/**
7849 * Pushes a word onto the stack, using a temporary stack pointer.
7850 *
7851 * @returns Strict VBox status code.
7852 * @param pIemCpu The IEM per CPU data.
7853 * @param u16Value The value to push.
7854 * @param pTmpRsp Pointer to the temporary stack pointer.
7855 */
7856IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7857{
7858 /* Increment the stack pointer. */
7859 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7860 RTUINT64U NewRsp = *pTmpRsp;
7861 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7862
7863 /* Write the word the lazy way. */
7864 uint16_t *pu16Dst;
7865 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7866 if (rc == VINF_SUCCESS)
7867 {
7868 *pu16Dst = u16Value;
7869 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7870 }
7871
7872 /* Commit the new RSP value unless we an access handler made trouble. */
7873 if (rc == VINF_SUCCESS)
7874 *pTmpRsp = NewRsp;
7875
7876 return rc;
7877}
7878
7879
7880/**
7881 * Pushes a dword onto the stack, using a temporary stack pointer.
7882 *
7883 * @returns Strict VBox status code.
7884 * @param pIemCpu The IEM per CPU data.
7885 * @param u32Value The value to push.
7886 * @param pTmpRsp Pointer to the temporary stack pointer.
7887 */
7888IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7889{
7890 /* Increment the stack pointer. */
7891 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7892 RTUINT64U NewRsp = *pTmpRsp;
7893 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7894
7895 /* Write the word the lazy way. */
7896 uint32_t *pu32Dst;
7897 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7898 if (rc == VINF_SUCCESS)
7899 {
7900 *pu32Dst = u32Value;
7901 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7902 }
7903
7904 /* Commit the new RSP value unless we an access handler made trouble. */
7905 if (rc == VINF_SUCCESS)
7906 *pTmpRsp = NewRsp;
7907
7908 return rc;
7909}
7910
7911
7912/**
7913 * Pushes a dword onto the stack, using a temporary stack pointer.
7914 *
7915 * @returns Strict VBox status code.
7916 * @param pIemCpu The IEM per CPU data.
7917 * @param u64Value The value to push.
7918 * @param pTmpRsp Pointer to the temporary stack pointer.
7919 */
7920IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7921{
7922 /* Increment the stack pointer. */
7923 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7924 RTUINT64U NewRsp = *pTmpRsp;
7925 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7926
7927 /* Write the word the lazy way. */
7928 uint64_t *pu64Dst;
7929 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7930 if (rc == VINF_SUCCESS)
7931 {
7932 *pu64Dst = u64Value;
7933 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7934 }
7935
7936 /* Commit the new RSP value unless we an access handler made trouble. */
7937 if (rc == VINF_SUCCESS)
7938 *pTmpRsp = NewRsp;
7939
7940 return rc;
7941}
7942
7943
7944/**
7945 * Pops a word from the stack, using a temporary stack pointer.
7946 *
7947 * @returns Strict VBox status code.
7948 * @param pIemCpu The IEM per CPU data.
7949 * @param pu16Value Where to store the popped value.
7950 * @param pTmpRsp Pointer to the temporary stack pointer.
7951 */
7952IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7953{
7954 /* Increment the stack pointer. */
7955 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7956 RTUINT64U NewRsp = *pTmpRsp;
7957 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7958
7959 /* Write the word the lazy way. */
7960 uint16_t const *pu16Src;
7961 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7962 if (rc == VINF_SUCCESS)
7963 {
7964 *pu16Value = *pu16Src;
7965 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7966
7967 /* Commit the new RSP value. */
7968 if (rc == VINF_SUCCESS)
7969 *pTmpRsp = NewRsp;
7970 }
7971
7972 return rc;
7973}
7974
7975
7976/**
7977 * Pops a dword from the stack, using a temporary stack pointer.
7978 *
7979 * @returns Strict VBox status code.
7980 * @param pIemCpu The IEM per CPU data.
7981 * @param pu32Value Where to store the popped value.
7982 * @param pTmpRsp Pointer to the temporary stack pointer.
7983 */
7984IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7985{
7986 /* Increment the stack pointer. */
7987 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7988 RTUINT64U NewRsp = *pTmpRsp;
7989 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7990
7991 /* Write the word the lazy way. */
7992 uint32_t const *pu32Src;
7993 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7994 if (rc == VINF_SUCCESS)
7995 {
7996 *pu32Value = *pu32Src;
7997 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7998
7999 /* Commit the new RSP value. */
8000 if (rc == VINF_SUCCESS)
8001 *pTmpRsp = NewRsp;
8002 }
8003
8004 return rc;
8005}
8006
8007
8008/**
8009 * Pops a qword from the stack, using a temporary stack pointer.
8010 *
8011 * @returns Strict VBox status code.
8012 * @param pIemCpu The IEM per CPU data.
8013 * @param pu64Value Where to store the popped value.
8014 * @param pTmpRsp Pointer to the temporary stack pointer.
8015 */
8016IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
8017{
8018 /* Increment the stack pointer. */
8019 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8020 RTUINT64U NewRsp = *pTmpRsp;
8021 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8022
8023 /* Write the word the lazy way. */
8024 uint64_t const *pu64Src;
8025 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8026 if (rcStrict == VINF_SUCCESS)
8027 {
8028 *pu64Value = *pu64Src;
8029 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8030
8031 /* Commit the new RSP value. */
8032 if (rcStrict == VINF_SUCCESS)
8033 *pTmpRsp = NewRsp;
8034 }
8035
8036 return rcStrict;
8037}
8038
8039
8040/**
8041 * Begin a special stack push (used by interrupt, exceptions and such).
8042 *
8043 * This will raise \#SS or \#PF if appropriate.
8044 *
8045 * @returns Strict VBox status code.
8046 * @param pIemCpu The IEM per CPU data.
8047 * @param cbMem The number of bytes to push onto the stack.
8048 * @param ppvMem Where to return the pointer to the stack memory.
8049 * As with the other memory functions this could be
8050 * direct access or bounce buffered access, so
8051 * don't commit register until the commit call
8052 * succeeds.
8053 * @param puNewRsp Where to return the new RSP value. This must be
8054 * passed unchanged to
8055 * iemMemStackPushCommitSpecial().
8056 */
8057IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8058{
8059 Assert(cbMem < UINT8_MAX);
8060 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8061 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8062 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8063}
8064
8065
8066/**
8067 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8068 *
8069 * This will update the rSP.
8070 *
8071 * @returns Strict VBox status code.
8072 * @param pIemCpu The IEM per CPU data.
8073 * @param pvMem The pointer returned by
8074 * iemMemStackPushBeginSpecial().
8075 * @param uNewRsp The new RSP value returned by
8076 * iemMemStackPushBeginSpecial().
8077 */
8078IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8079{
8080 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8081 if (rcStrict == VINF_SUCCESS)
8082 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8083 return rcStrict;
8084}
8085
8086
8087/**
8088 * Begin a special stack pop (used by iret, retf and such).
8089 *
8090 * This will raise \#SS or \#PF if appropriate.
8091 *
8092 * @returns Strict VBox status code.
8093 * @param pIemCpu The IEM per CPU data.
8094 * @param cbMem The number of bytes to push onto the stack.
8095 * @param ppvMem Where to return the pointer to the stack memory.
8096 * @param puNewRsp Where to return the new RSP value. This must be
8097 * passed unchanged to
8098 * iemMemStackPopCommitSpecial() or applied
8099 * manually if iemMemStackPopDoneSpecial() is used.
8100 */
8101IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8102{
8103 Assert(cbMem < UINT8_MAX);
8104 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8105 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8106 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8107}
8108
8109
8110/**
8111 * Continue a special stack pop (used by iret and retf).
8112 *
8113 * This will raise \#SS or \#PF if appropriate.
8114 *
8115 * @returns Strict VBox status code.
8116 * @param pIemCpu The IEM per CPU data.
8117 * @param cbMem The number of bytes to push onto the stack.
8118 * @param ppvMem Where to return the pointer to the stack memory.
8119 * @param puNewRsp Where to return the new RSP value. This must be
8120 * passed unchanged to
8121 * iemMemStackPopCommitSpecial() or applied
8122 * manually if iemMemStackPopDoneSpecial() is used.
8123 */
8124IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8125{
8126 Assert(cbMem < UINT8_MAX);
8127 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8128 RTUINT64U NewRsp;
8129 NewRsp.u = *puNewRsp;
8130 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8131 *puNewRsp = NewRsp.u;
8132 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8133}
8134
8135
8136/**
8137 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8138 *
8139 * This will update the rSP.
8140 *
8141 * @returns Strict VBox status code.
8142 * @param pIemCpu The IEM per CPU data.
8143 * @param pvMem The pointer returned by
8144 * iemMemStackPopBeginSpecial().
8145 * @param uNewRsp The new RSP value returned by
8146 * iemMemStackPopBeginSpecial().
8147 */
8148IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8149{
8150 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8151 if (rcStrict == VINF_SUCCESS)
8152 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8153 return rcStrict;
8154}
8155
8156
8157/**
8158 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8159 * iemMemStackPopContinueSpecial).
8160 *
8161 * The caller will manually commit the rSP.
8162 *
8163 * @returns Strict VBox status code.
8164 * @param pIemCpu The IEM per CPU data.
8165 * @param pvMem The pointer returned by
8166 * iemMemStackPopBeginSpecial() or
8167 * iemMemStackPopContinueSpecial().
8168 */
8169IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8170{
8171 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8172}
8173
8174
8175/**
8176 * Fetches a system table byte.
8177 *
8178 * @returns Strict VBox status code.
8179 * @param pIemCpu The IEM per CPU data.
8180 * @param pbDst Where to return the byte.
8181 * @param iSegReg The index of the segment register to use for
8182 * this access. The base and limits are checked.
8183 * @param GCPtrMem The address of the guest memory.
8184 */
8185IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8186{
8187 /* The lazy approach for now... */
8188 uint8_t const *pbSrc;
8189 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8190 if (rc == VINF_SUCCESS)
8191 {
8192 *pbDst = *pbSrc;
8193 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8194 }
8195 return rc;
8196}
8197
8198
8199/**
8200 * Fetches a system table word.
8201 *
8202 * @returns Strict VBox status code.
8203 * @param pIemCpu The IEM per CPU data.
8204 * @param pu16Dst Where to return the word.
8205 * @param iSegReg The index of the segment register to use for
8206 * this access. The base and limits are checked.
8207 * @param GCPtrMem The address of the guest memory.
8208 */
8209IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8210{
8211 /* The lazy approach for now... */
8212 uint16_t const *pu16Src;
8213 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8214 if (rc == VINF_SUCCESS)
8215 {
8216 *pu16Dst = *pu16Src;
8217 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8218 }
8219 return rc;
8220}
8221
8222
8223/**
8224 * Fetches a system table dword.
8225 *
8226 * @returns Strict VBox status code.
8227 * @param pIemCpu The IEM per CPU data.
8228 * @param pu32Dst Where to return the dword.
8229 * @param iSegReg The index of the segment register to use for
8230 * this access. The base and limits are checked.
8231 * @param GCPtrMem The address of the guest memory.
8232 */
8233IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8234{
8235 /* The lazy approach for now... */
8236 uint32_t const *pu32Src;
8237 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8238 if (rc == VINF_SUCCESS)
8239 {
8240 *pu32Dst = *pu32Src;
8241 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8242 }
8243 return rc;
8244}
8245
8246
8247/**
8248 * Fetches a system table qword.
8249 *
8250 * @returns Strict VBox status code.
8251 * @param pIemCpu The IEM per CPU data.
8252 * @param pu64Dst Where to return the qword.
8253 * @param iSegReg The index of the segment register to use for
8254 * this access. The base and limits are checked.
8255 * @param GCPtrMem The address of the guest memory.
8256 */
8257IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8258{
8259 /* The lazy approach for now... */
8260 uint64_t const *pu64Src;
8261 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8262 if (rc == VINF_SUCCESS)
8263 {
8264 *pu64Dst = *pu64Src;
8265 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8266 }
8267 return rc;
8268}
8269
8270
8271/**
8272 * Fetches a descriptor table entry with caller specified error code.
8273 *
8274 * @returns Strict VBox status code.
8275 * @param pIemCpu The IEM per CPU.
8276 * @param pDesc Where to return the descriptor table entry.
8277 * @param uSel The selector which table entry to fetch.
8278 * @param uXcpt The exception to raise on table lookup error.
8279 * @param uErrorCode The error code associated with the exception.
8280 */
8281IEM_STATIC VBOXSTRICTRC
8282iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8283{
8284 AssertPtr(pDesc);
8285 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8286
8287 /** @todo did the 286 require all 8 bytes to be accessible? */
8288 /*
8289 * Get the selector table base and check bounds.
8290 */
8291 RTGCPTR GCPtrBase;
8292 if (uSel & X86_SEL_LDT)
8293 {
8294 if ( !pCtx->ldtr.Attr.n.u1Present
8295 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8296 {
8297 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8298 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8299 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8300 uErrorCode, 0);
8301 }
8302
8303 Assert(pCtx->ldtr.Attr.n.u1Present);
8304 GCPtrBase = pCtx->ldtr.u64Base;
8305 }
8306 else
8307 {
8308 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8309 {
8310 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8311 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8312 uErrorCode, 0);
8313 }
8314 GCPtrBase = pCtx->gdtr.pGdt;
8315 }
8316
8317 /*
8318 * Read the legacy descriptor and maybe the long mode extensions if
8319 * required.
8320 */
8321 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8322 if (rcStrict == VINF_SUCCESS)
8323 {
8324 if ( !IEM_IS_LONG_MODE(pIemCpu)
8325 || pDesc->Legacy.Gen.u1DescType)
8326 pDesc->Long.au64[1] = 0;
8327 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8328 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8329 else
8330 {
8331 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8332 /** @todo is this the right exception? */
8333 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8334 }
8335 }
8336 return rcStrict;
8337}
8338
8339
8340/**
8341 * Fetches a descriptor table entry.
8342 *
8343 * @returns Strict VBox status code.
8344 * @param pIemCpu The IEM per CPU.
8345 * @param pDesc Where to return the descriptor table entry.
8346 * @param uSel The selector which table entry to fetch.
8347 * @param uXcpt The exception to raise on table lookup error.
8348 */
8349IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8350{
8351 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8352}
8353
8354
8355/**
8356 * Fakes a long mode stack selector for SS = 0.
8357 *
8358 * @param pDescSs Where to return the fake stack descriptor.
8359 * @param uDpl The DPL we want.
8360 */
8361IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8362{
8363 pDescSs->Long.au64[0] = 0;
8364 pDescSs->Long.au64[1] = 0;
8365 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8366 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8367 pDescSs->Long.Gen.u2Dpl = uDpl;
8368 pDescSs->Long.Gen.u1Present = 1;
8369 pDescSs->Long.Gen.u1Long = 1;
8370}
8371
8372
8373/**
8374 * Marks the selector descriptor as accessed (only non-system descriptors).
8375 *
8376 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8377 * will therefore skip the limit checks.
8378 *
8379 * @returns Strict VBox status code.
8380 * @param pIemCpu The IEM per CPU.
8381 * @param uSel The selector.
8382 */
8383IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8384{
8385 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8386
8387 /*
8388 * Get the selector table base and calculate the entry address.
8389 */
8390 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8391 ? pCtx->ldtr.u64Base
8392 : pCtx->gdtr.pGdt;
8393 GCPtr += uSel & X86_SEL_MASK;
8394
8395 /*
8396 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8397 * ugly stuff to avoid this. This will make sure it's an atomic access
8398 * as well more or less remove any question about 8-bit or 32-bit accesss.
8399 */
8400 VBOXSTRICTRC rcStrict;
8401 uint32_t volatile *pu32;
8402 if ((GCPtr & 3) == 0)
8403 {
8404 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8405 GCPtr += 2 + 2;
8406 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8407 if (rcStrict != VINF_SUCCESS)
8408 return rcStrict;
8409 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8410 }
8411 else
8412 {
8413 /* The misaligned GDT/LDT case, map the whole thing. */
8414 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8415 if (rcStrict != VINF_SUCCESS)
8416 return rcStrict;
8417 switch ((uintptr_t)pu32 & 3)
8418 {
8419 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8420 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8421 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8422 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8423 }
8424 }
8425
8426 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8427}
8428
8429/** @} */
8430
8431
8432/*
8433 * Include the C/C++ implementation of instruction.
8434 */
8435#include "IEMAllCImpl.cpp.h"
8436
8437
8438
8439/** @name "Microcode" macros.
8440 *
8441 * The idea is that we should be able to use the same code to interpret
8442 * instructions as well as recompiler instructions. Thus this obfuscation.
8443 *
8444 * @{
8445 */
8446#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8447#define IEM_MC_END() }
8448#define IEM_MC_PAUSE() do {} while (0)
8449#define IEM_MC_CONTINUE() do {} while (0)
8450
8451/** Internal macro. */
8452#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8453 do \
8454 { \
8455 VBOXSTRICTRC rcStrict2 = a_Expr; \
8456 if (rcStrict2 != VINF_SUCCESS) \
8457 return rcStrict2; \
8458 } while (0)
8459
8460#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8461#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8462#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8463#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8464#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8465#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8466#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8467
8468#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8469#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8470 do { \
8471 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8472 return iemRaiseDeviceNotAvailable(pIemCpu); \
8473 } while (0)
8474#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8475 do { \
8476 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8477 return iemRaiseMathFault(pIemCpu); \
8478 } while (0)
8479#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8480 do { \
8481 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8482 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8483 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8484 return iemRaiseUndefinedOpcode(pIemCpu); \
8485 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8486 return iemRaiseDeviceNotAvailable(pIemCpu); \
8487 } while (0)
8488#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8489 do { \
8490 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8491 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8492 return iemRaiseUndefinedOpcode(pIemCpu); \
8493 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8494 return iemRaiseDeviceNotAvailable(pIemCpu); \
8495 } while (0)
8496#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8497 do { \
8498 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8499 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8500 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8501 return iemRaiseUndefinedOpcode(pIemCpu); \
8502 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8503 return iemRaiseDeviceNotAvailable(pIemCpu); \
8504 } while (0)
8505#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8506 do { \
8507 if (pIemCpu->uCpl != 0) \
8508 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8509 } while (0)
8510
8511
8512#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8513#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8514#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8515#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8516#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8517#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8518#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8519 uint32_t a_Name; \
8520 uint32_t *a_pName = &a_Name
8521#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8522 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8523
8524#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8525#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8526
8527#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8528#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8529#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8530#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8531#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8532#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8533#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8534#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8535#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8536#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8537#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8538#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8539#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8540#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8541#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8542#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8543#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8544#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8545#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8546#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8547#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8548#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8549#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8550#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8551#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8552#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8553#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8554#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8555#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8556/** @note Not for IOPL or IF testing or modification. */
8557#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8558#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8559#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8560#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8561
8562#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8563#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8564#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8565#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8566#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8567#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8568#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8569#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8570#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8571#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8572#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8573 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8574
8575#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8576#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8577/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8578 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8579#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8580#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8581/** @note Not for IOPL or IF testing or modification. */
8582#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8583
8584#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8585#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8586#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8587 do { \
8588 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8589 *pu32Reg += (a_u32Value); \
8590 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8591 } while (0)
8592#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8593
8594#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8595#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8596#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8597 do { \
8598 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8599 *pu32Reg -= (a_u32Value); \
8600 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8601 } while (0)
8602#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8603#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
8604
8605#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8606#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8607#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8608#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8609#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8610#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8611#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8612
8613#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8614#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8615#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8616#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8617
8618#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8619#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8620#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8621
8622#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8623#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
8624#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8625
8626#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8627#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8628#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8629
8630#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8631#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8632#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8633
8634#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8635
8636#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8637
8638#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8639#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8640#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8641 do { \
8642 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8643 *pu32Reg &= (a_u32Value); \
8644 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8645 } while (0)
8646#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8647
8648#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8649#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8650#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8651 do { \
8652 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8653 *pu32Reg |= (a_u32Value); \
8654 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8655 } while (0)
8656#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8657
8658
8659/** @note Not for IOPL or IF modification. */
8660#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8661/** @note Not for IOPL or IF modification. */
8662#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8663/** @note Not for IOPL or IF modification. */
8664#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8665
8666#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8667
8668
8669#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8670 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8671#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8672 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8673#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8674 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8675#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8676 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8677#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8678 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8679#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8680 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8681#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8682 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8683
8684#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8685 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8686#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8687 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8688#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8689 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8690#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8691 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8692#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8693 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8694 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8695 } while (0)
8696#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8697 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8698 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8699 } while (0)
8700#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8701 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8702#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8703 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8704#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8705 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8706
8707#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8709#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8710 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8711#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8712 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8713
8714#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8715 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8716#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8718#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8720
8721#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8723#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8724 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8725#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8727
8728#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8730
8731#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8732 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8733#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8734 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8735#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8736 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8737#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8739
8740#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8742#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8743 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8744#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8746
8747#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8748 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8749#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8750 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8751
8752
8753
8754#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8755 do { \
8756 uint8_t u8Tmp; \
8757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8758 (a_u16Dst) = u8Tmp; \
8759 } while (0)
8760#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8761 do { \
8762 uint8_t u8Tmp; \
8763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8764 (a_u32Dst) = u8Tmp; \
8765 } while (0)
8766#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8767 do { \
8768 uint8_t u8Tmp; \
8769 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8770 (a_u64Dst) = u8Tmp; \
8771 } while (0)
8772#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8773 do { \
8774 uint16_t u16Tmp; \
8775 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8776 (a_u32Dst) = u16Tmp; \
8777 } while (0)
8778#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8779 do { \
8780 uint16_t u16Tmp; \
8781 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8782 (a_u64Dst) = u16Tmp; \
8783 } while (0)
8784#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8785 do { \
8786 uint32_t u32Tmp; \
8787 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8788 (a_u64Dst) = u32Tmp; \
8789 } while (0)
8790
8791#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8792 do { \
8793 uint8_t u8Tmp; \
8794 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8795 (a_u16Dst) = (int8_t)u8Tmp; \
8796 } while (0)
8797#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8798 do { \
8799 uint8_t u8Tmp; \
8800 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8801 (a_u32Dst) = (int8_t)u8Tmp; \
8802 } while (0)
8803#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8804 do { \
8805 uint8_t u8Tmp; \
8806 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8807 (a_u64Dst) = (int8_t)u8Tmp; \
8808 } while (0)
8809#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8810 do { \
8811 uint16_t u16Tmp; \
8812 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8813 (a_u32Dst) = (int16_t)u16Tmp; \
8814 } while (0)
8815#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8816 do { \
8817 uint16_t u16Tmp; \
8818 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8819 (a_u64Dst) = (int16_t)u16Tmp; \
8820 } while (0)
8821#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8822 do { \
8823 uint32_t u32Tmp; \
8824 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8825 (a_u64Dst) = (int32_t)u32Tmp; \
8826 } while (0)
8827
8828#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8829 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8830#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8831 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8832#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8833 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8834#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8835 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8836
8837#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8838 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8839#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8840 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8841#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8842 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8843#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8844 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8845
8846#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8847#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8848#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8849#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8850#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8851#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8852#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8853 do { \
8854 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8855 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8856 } while (0)
8857
8858#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8859 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8860#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8861 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8862
8863
8864#define IEM_MC_PUSH_U16(a_u16Value) \
8865 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8866#define IEM_MC_PUSH_U32(a_u32Value) \
8867 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8868#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8869 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8870#define IEM_MC_PUSH_U64(a_u64Value) \
8871 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8872
8873#define IEM_MC_POP_U16(a_pu16Value) \
8874 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8875#define IEM_MC_POP_U32(a_pu32Value) \
8876 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8877#define IEM_MC_POP_U64(a_pu64Value) \
8878 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8879
8880/** Maps guest memory for direct or bounce buffered access.
8881 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8882 * @remarks May return.
8883 */
8884#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8885 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8886
8887/** Maps guest memory for direct or bounce buffered access.
8888 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8889 * @remarks May return.
8890 */
8891#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8892 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8893
8894/** Commits the memory and unmaps the guest memory.
8895 * @remarks May return.
8896 */
8897#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8898 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8899
8900/** Commits the memory and unmaps the guest memory unless the FPU status word
8901 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8902 * that would cause FLD not to store.
8903 *
8904 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8905 * store, while \#P will not.
8906 *
8907 * @remarks May in theory return - for now.
8908 */
8909#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8910 do { \
8911 if ( !(a_u16FSW & X86_FSW_ES) \
8912 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8913 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8914 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8915 } while (0)
8916
8917/** Calculate efficient address from R/M. */
8918#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8919 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8920
8921#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8922#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8923#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8924#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8925#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8926#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8927#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8928
8929/**
8930 * Defers the rest of the instruction emulation to a C implementation routine
8931 * and returns, only taking the standard parameters.
8932 *
8933 * @param a_pfnCImpl The pointer to the C routine.
8934 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8935 */
8936#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8937
8938/**
8939 * Defers the rest of instruction emulation to a C implementation routine and
8940 * returns, taking one argument in addition to the standard ones.
8941 *
8942 * @param a_pfnCImpl The pointer to the C routine.
8943 * @param a0 The argument.
8944 */
8945#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8946
8947/**
8948 * Defers the rest of the instruction emulation to a C implementation routine
8949 * and returns, taking two arguments in addition to the standard ones.
8950 *
8951 * @param a_pfnCImpl The pointer to the C routine.
8952 * @param a0 The first extra argument.
8953 * @param a1 The second extra argument.
8954 */
8955#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8956
8957/**
8958 * Defers the rest of the instruction emulation to a C implementation routine
8959 * and returns, taking three arguments in addition to the standard ones.
8960 *
8961 * @param a_pfnCImpl The pointer to the C routine.
8962 * @param a0 The first extra argument.
8963 * @param a1 The second extra argument.
8964 * @param a2 The third extra argument.
8965 */
8966#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8967
8968/**
8969 * Defers the rest of the instruction emulation to a C implementation routine
8970 * and returns, taking four arguments in addition to the standard ones.
8971 *
8972 * @param a_pfnCImpl The pointer to the C routine.
8973 * @param a0 The first extra argument.
8974 * @param a1 The second extra argument.
8975 * @param a2 The third extra argument.
8976 * @param a3 The fourth extra argument.
8977 */
8978#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8979
8980/**
8981 * Defers the rest of the instruction emulation to a C implementation routine
8982 * and returns, taking two arguments in addition to the standard ones.
8983 *
8984 * @param a_pfnCImpl The pointer to the C routine.
8985 * @param a0 The first extra argument.
8986 * @param a1 The second extra argument.
8987 * @param a2 The third extra argument.
8988 * @param a3 The fourth extra argument.
8989 * @param a4 The fifth extra argument.
8990 */
8991#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8992
8993/**
8994 * Defers the entire instruction emulation to a C implementation routine and
8995 * returns, only taking the standard parameters.
8996 *
8997 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8998 *
8999 * @param a_pfnCImpl The pointer to the C routine.
9000 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
9001 */
9002#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
9003
9004/**
9005 * Defers the entire instruction emulation to a C implementation routine and
9006 * returns, taking one argument in addition to the standard ones.
9007 *
9008 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9009 *
9010 * @param a_pfnCImpl The pointer to the C routine.
9011 * @param a0 The argument.
9012 */
9013#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
9014
9015/**
9016 * Defers the entire instruction emulation to a C implementation routine and
9017 * returns, taking two arguments in addition to the standard ones.
9018 *
9019 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9020 *
9021 * @param a_pfnCImpl The pointer to the C routine.
9022 * @param a0 The first extra argument.
9023 * @param a1 The second extra argument.
9024 */
9025#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
9026
9027/**
9028 * Defers the entire instruction emulation to a C implementation routine and
9029 * returns, taking three arguments in addition to the standard ones.
9030 *
9031 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9032 *
9033 * @param a_pfnCImpl The pointer to the C routine.
9034 * @param a0 The first extra argument.
9035 * @param a1 The second extra argument.
9036 * @param a2 The third extra argument.
9037 */
9038#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
9039
9040/**
9041 * Calls a FPU assembly implementation taking one visible argument.
9042 *
9043 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9044 * @param a0 The first extra argument.
9045 */
9046#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
9047 do { \
9048 iemFpuPrepareUsage(pIemCpu); \
9049 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
9050 } while (0)
9051
9052/**
9053 * Calls a FPU assembly implementation taking two visible arguments.
9054 *
9055 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9056 * @param a0 The first extra argument.
9057 * @param a1 The second extra argument.
9058 */
9059#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9060 do { \
9061 iemFpuPrepareUsage(pIemCpu); \
9062 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9063 } while (0)
9064
9065/**
9066 * Calls a FPU assembly implementation taking three visible arguments.
9067 *
9068 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9069 * @param a0 The first extra argument.
9070 * @param a1 The second extra argument.
9071 * @param a2 The third extra argument.
9072 */
9073#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9074 do { \
9075 iemFpuPrepareUsage(pIemCpu); \
9076 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9077 } while (0)
9078
9079#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9080 do { \
9081 (a_FpuData).FSW = (a_FSW); \
9082 (a_FpuData).r80Result = *(a_pr80Value); \
9083 } while (0)
9084
9085/** Pushes FPU result onto the stack. */
9086#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9087 iemFpuPushResult(pIemCpu, &a_FpuData)
9088/** Pushes FPU result onto the stack and sets the FPUDP. */
9089#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9090 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9091
9092/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9093#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9094 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9095
9096/** Stores FPU result in a stack register. */
9097#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9098 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9099/** Stores FPU result in a stack register and pops the stack. */
9100#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9101 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9102/** Stores FPU result in a stack register and sets the FPUDP. */
9103#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9104 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9105/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9106 * stack. */
9107#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9108 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9109
9110/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9111#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9112 iemFpuUpdateOpcodeAndIp(pIemCpu)
9113/** Free a stack register (for FFREE and FFREEP). */
9114#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9115 iemFpuStackFree(pIemCpu, a_iStReg)
9116/** Increment the FPU stack pointer. */
9117#define IEM_MC_FPU_STACK_INC_TOP() \
9118 iemFpuStackIncTop(pIemCpu)
9119/** Decrement the FPU stack pointer. */
9120#define IEM_MC_FPU_STACK_DEC_TOP() \
9121 iemFpuStackDecTop(pIemCpu)
9122
9123/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9124#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9125 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9126/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9127#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9128 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9129/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9130#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9131 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9132/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9133#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9134 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9135/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9136 * stack. */
9137#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9138 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9139/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9140#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9141 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9142
9143/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9144#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9145 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9146/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9147 * stack. */
9148#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9149 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9150/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9151 * FPUDS. */
9152#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9153 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9154/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9155 * FPUDS. Pops stack. */
9156#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9157 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9158/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9159 * stack twice. */
9160#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9161 iemFpuStackUnderflowThenPopPop(pIemCpu)
9162/** Raises a FPU stack underflow exception for an instruction pushing a result
9163 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9164#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9165 iemFpuStackPushUnderflow(pIemCpu)
9166/** Raises a FPU stack underflow exception for an instruction pushing a result
9167 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9168#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9169 iemFpuStackPushUnderflowTwo(pIemCpu)
9170
9171/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9172 * FPUIP, FPUCS and FOP. */
9173#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9174 iemFpuStackPushOverflow(pIemCpu)
9175/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9176 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9177#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9178 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9179/** Indicates that we (might) have modified the FPU state. */
9180#define IEM_MC_USED_FPU() \
9181 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9182
9183/**
9184 * Calls a MMX assembly implementation taking two visible arguments.
9185 *
9186 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9187 * @param a0 The first extra argument.
9188 * @param a1 The second extra argument.
9189 */
9190#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9191 do { \
9192 iemFpuPrepareUsage(pIemCpu); \
9193 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9194 } while (0)
9195
9196/**
9197 * Calls a MMX assembly implementation taking three visible arguments.
9198 *
9199 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9200 * @param a0 The first extra argument.
9201 * @param a1 The second extra argument.
9202 * @param a2 The third extra argument.
9203 */
9204#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9205 do { \
9206 iemFpuPrepareUsage(pIemCpu); \
9207 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9208 } while (0)
9209
9210
9211/**
9212 * Calls a SSE assembly implementation taking two visible arguments.
9213 *
9214 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9215 * @param a0 The first extra argument.
9216 * @param a1 The second extra argument.
9217 */
9218#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9219 do { \
9220 iemFpuPrepareUsageSse(pIemCpu); \
9221 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9222 } while (0)
9223
9224/**
9225 * Calls a SSE assembly implementation taking three visible arguments.
9226 *
9227 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9228 * @param a0 The first extra argument.
9229 * @param a1 The second extra argument.
9230 * @param a2 The third extra argument.
9231 */
9232#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9233 do { \
9234 iemFpuPrepareUsageSse(pIemCpu); \
9235 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9236 } while (0)
9237
9238
9239/** @note Not for IOPL or IF testing. */
9240#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9241/** @note Not for IOPL or IF testing. */
9242#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9243/** @note Not for IOPL or IF testing. */
9244#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9245/** @note Not for IOPL or IF testing. */
9246#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9247/** @note Not for IOPL or IF testing. */
9248#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9249 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9250 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9251/** @note Not for IOPL or IF testing. */
9252#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9253 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9254 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9255/** @note Not for IOPL or IF testing. */
9256#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9257 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9258 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9259 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9260/** @note Not for IOPL or IF testing. */
9261#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9262 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9263 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9264 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9265#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9266#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9267#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9268/** @note Not for IOPL or IF testing. */
9269#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9270 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9271 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9272/** @note Not for IOPL or IF testing. */
9273#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9274 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9275 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9276/** @note Not for IOPL or IF testing. */
9277#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9278 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9279 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9280/** @note Not for IOPL or IF testing. */
9281#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9282 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9283 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9284/** @note Not for IOPL or IF testing. */
9285#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9286 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9287 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9288/** @note Not for IOPL or IF testing. */
9289#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9290 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9291 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9292#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9293#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9294#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9295 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9296#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9297 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9298#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9299 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9300#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9301 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9302#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9303 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9304#define IEM_MC_IF_FCW_IM() \
9305 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9306
9307#define IEM_MC_ELSE() } else {
9308#define IEM_MC_ENDIF() } do {} while (0)
9309
9310/** @} */
9311
9312
9313/** @name Opcode Debug Helpers.
9314 * @{
9315 */
9316#ifdef DEBUG
9317# define IEMOP_MNEMONIC(a_szMnemonic) \
9318 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9319 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9320# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9321 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9322 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9323#else
9324# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9325# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9326#endif
9327
9328/** @} */
9329
9330
9331/** @name Opcode Helpers.
9332 * @{
9333 */
9334
9335#ifdef IN_RING3
9336# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9337 do { \
9338 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9339 else \
9340 { \
9341 DBGFSTOP(IEMCPU_TO_VM(pIemCpu)); \
9342 return IEMOP_RAISE_INVALID_OPCODE(); \
9343 } \
9344 } while (0)
9345#else
9346# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9347 do { \
9348 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9349 else return IEMOP_RAISE_INVALID_OPCODE(); \
9350 } while (0)
9351#endif
9352
9353/** The instruction requires a 186 or later. */
9354#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
9355# define IEMOP_HLP_MIN_186() do { } while (0)
9356#else
9357# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
9358#endif
9359
9360/** The instruction requires a 286 or later. */
9361#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
9362# define IEMOP_HLP_MIN_286() do { } while (0)
9363#else
9364# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
9365#endif
9366
9367/** The instruction requires a 386 or later. */
9368#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9369# define IEMOP_HLP_MIN_386() do { } while (0)
9370#else
9371# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
9372#endif
9373
9374/** The instruction requires a 386 or later if the given expression is true. */
9375#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9376# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
9377#else
9378# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
9379#endif
9380
9381/** The instruction requires a 486 or later. */
9382#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
9383# define IEMOP_HLP_MIN_486() do { } while (0)
9384#else
9385# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
9386#endif
9387
9388/** The instruction requires a Pentium (586) or later. */
9389#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_586
9390# define IEMOP_HLP_MIN_586() do { } while (0)
9391#else
9392# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_586, true)
9393#endif
9394
9395/** The instruction requires a PentiumPro (686) or later. */
9396#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_686
9397# define IEMOP_HLP_MIN_686() do { } while (0)
9398#else
9399# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_686, true)
9400#endif
9401
9402
9403/** The instruction raises an \#UD in real and V8086 mode. */
9404#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9405 do \
9406 { \
9407 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9408 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9409 } while (0)
9410
9411/** The instruction allows no lock prefixing (in this encoding), throw \#UD if
9412 * lock prefixed.
9413 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9414#define IEMOP_HLP_NO_LOCK_PREFIX() \
9415 do \
9416 { \
9417 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9418 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9419 } while (0)
9420
9421/** The instruction is not available in 64-bit mode, throw \#UD if we're in
9422 * 64-bit mode. */
9423#define IEMOP_HLP_NO_64BIT() \
9424 do \
9425 { \
9426 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9427 return IEMOP_RAISE_INVALID_OPCODE(); \
9428 } while (0)
9429
9430/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
9431 * 64-bit mode. */
9432#define IEMOP_HLP_ONLY_64BIT() \
9433 do \
9434 { \
9435 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9436 return IEMOP_RAISE_INVALID_OPCODE(); \
9437 } while (0)
9438
9439/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9440#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9441 do \
9442 { \
9443 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9444 iemRecalEffOpSize64Default(pIemCpu); \
9445 } while (0)
9446
9447/** The instruction has 64-bit operand size if 64-bit mode. */
9448#define IEMOP_HLP_64BIT_OP_SIZE() \
9449 do \
9450 { \
9451 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9452 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9453 } while (0)
9454
9455/** Only a REX prefix immediately preceeding the first opcode byte takes
9456 * effect. This macro helps ensuring this as well as logging bad guest code. */
9457#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9458 do \
9459 { \
9460 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9461 { \
9462 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9463 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9464 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9465 pIemCpu->uRexB = 0; \
9466 pIemCpu->uRexIndex = 0; \
9467 pIemCpu->uRexReg = 0; \
9468 iemRecalEffOpSize(pIemCpu); \
9469 } \
9470 } while (0)
9471
9472/**
9473 * Done decoding.
9474 */
9475#define IEMOP_HLP_DONE_DECODING() \
9476 do \
9477 { \
9478 /*nothing for now, maybe later... */ \
9479 } while (0)
9480
9481/**
9482 * Done decoding, raise \#UD exception if lock prefix present.
9483 */
9484#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9485 do \
9486 { \
9487 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9488 { /* likely */ } \
9489 else \
9490 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9491 } while (0)
9492#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9493 do \
9494 { \
9495 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9496 { /* likely */ } \
9497 else \
9498 { \
9499 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9500 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9501 } \
9502 } while (0)
9503#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9504 do \
9505 { \
9506 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9507 { /* likely */ } \
9508 else \
9509 { \
9510 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9511 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9512 } \
9513 } while (0)
9514/**
9515 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9516 * are present.
9517 */
9518#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9519 do \
9520 { \
9521 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9522 { /* likely */ } \
9523 else \
9524 return IEMOP_RAISE_INVALID_OPCODE(); \
9525 } while (0)
9526
9527
9528/**
9529 * Calculates the effective address of a ModR/M memory operand.
9530 *
9531 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9532 *
9533 * @return Strict VBox status code.
9534 * @param pIemCpu The IEM per CPU data.
9535 * @param bRm The ModRM byte.
9536 * @param cbImm The size of any immediate following the
9537 * effective address opcode bytes. Important for
9538 * RIP relative addressing.
9539 * @param pGCPtrEff Where to return the effective address.
9540 */
9541IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9542{
9543 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9544 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9545#define SET_SS_DEF() \
9546 do \
9547 { \
9548 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9549 pIemCpu->iEffSeg = X86_SREG_SS; \
9550 } while (0)
9551
9552 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9553 {
9554/** @todo Check the effective address size crap! */
9555 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9556 {
9557 uint16_t u16EffAddr;
9558
9559 /* Handle the disp16 form with no registers first. */
9560 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9561 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9562 else
9563 {
9564 /* Get the displacment. */
9565 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9566 {
9567 case 0: u16EffAddr = 0; break;
9568 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9569 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9570 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9571 }
9572
9573 /* Add the base and index registers to the disp. */
9574 switch (bRm & X86_MODRM_RM_MASK)
9575 {
9576 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9577 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9578 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9579 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9580 case 4: u16EffAddr += pCtx->si; break;
9581 case 5: u16EffAddr += pCtx->di; break;
9582 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9583 case 7: u16EffAddr += pCtx->bx; break;
9584 }
9585 }
9586
9587 *pGCPtrEff = u16EffAddr;
9588 }
9589 else
9590 {
9591 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9592 uint32_t u32EffAddr;
9593
9594 /* Handle the disp32 form with no registers first. */
9595 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9596 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9597 else
9598 {
9599 /* Get the register (or SIB) value. */
9600 switch ((bRm & X86_MODRM_RM_MASK))
9601 {
9602 case 0: u32EffAddr = pCtx->eax; break;
9603 case 1: u32EffAddr = pCtx->ecx; break;
9604 case 2: u32EffAddr = pCtx->edx; break;
9605 case 3: u32EffAddr = pCtx->ebx; break;
9606 case 4: /* SIB */
9607 {
9608 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9609
9610 /* Get the index and scale it. */
9611 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9612 {
9613 case 0: u32EffAddr = pCtx->eax; break;
9614 case 1: u32EffAddr = pCtx->ecx; break;
9615 case 2: u32EffAddr = pCtx->edx; break;
9616 case 3: u32EffAddr = pCtx->ebx; break;
9617 case 4: u32EffAddr = 0; /*none */ break;
9618 case 5: u32EffAddr = pCtx->ebp; break;
9619 case 6: u32EffAddr = pCtx->esi; break;
9620 case 7: u32EffAddr = pCtx->edi; break;
9621 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9622 }
9623 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9624
9625 /* add base */
9626 switch (bSib & X86_SIB_BASE_MASK)
9627 {
9628 case 0: u32EffAddr += pCtx->eax; break;
9629 case 1: u32EffAddr += pCtx->ecx; break;
9630 case 2: u32EffAddr += pCtx->edx; break;
9631 case 3: u32EffAddr += pCtx->ebx; break;
9632 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9633 case 5:
9634 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9635 {
9636 u32EffAddr += pCtx->ebp;
9637 SET_SS_DEF();
9638 }
9639 else
9640 {
9641 uint32_t u32Disp;
9642 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9643 u32EffAddr += u32Disp;
9644 }
9645 break;
9646 case 6: u32EffAddr += pCtx->esi; break;
9647 case 7: u32EffAddr += pCtx->edi; break;
9648 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9649 }
9650 break;
9651 }
9652 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9653 case 6: u32EffAddr = pCtx->esi; break;
9654 case 7: u32EffAddr = pCtx->edi; break;
9655 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9656 }
9657
9658 /* Get and add the displacement. */
9659 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9660 {
9661 case 0:
9662 break;
9663 case 1:
9664 {
9665 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9666 u32EffAddr += i8Disp;
9667 break;
9668 }
9669 case 2:
9670 {
9671 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9672 u32EffAddr += u32Disp;
9673 break;
9674 }
9675 default:
9676 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9677 }
9678
9679 }
9680 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9681 *pGCPtrEff = u32EffAddr;
9682 else
9683 {
9684 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9685 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9686 }
9687 }
9688 }
9689 else
9690 {
9691 uint64_t u64EffAddr;
9692
9693 /* Handle the rip+disp32 form with no registers first. */
9694 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9695 {
9696 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9697 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9698 }
9699 else
9700 {
9701 /* Get the register (or SIB) value. */
9702 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9703 {
9704 case 0: u64EffAddr = pCtx->rax; break;
9705 case 1: u64EffAddr = pCtx->rcx; break;
9706 case 2: u64EffAddr = pCtx->rdx; break;
9707 case 3: u64EffAddr = pCtx->rbx; break;
9708 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9709 case 6: u64EffAddr = pCtx->rsi; break;
9710 case 7: u64EffAddr = pCtx->rdi; break;
9711 case 8: u64EffAddr = pCtx->r8; break;
9712 case 9: u64EffAddr = pCtx->r9; break;
9713 case 10: u64EffAddr = pCtx->r10; break;
9714 case 11: u64EffAddr = pCtx->r11; break;
9715 case 13: u64EffAddr = pCtx->r13; break;
9716 case 14: u64EffAddr = pCtx->r14; break;
9717 case 15: u64EffAddr = pCtx->r15; break;
9718 /* SIB */
9719 case 4:
9720 case 12:
9721 {
9722 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9723
9724 /* Get the index and scale it. */
9725 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9726 {
9727 case 0: u64EffAddr = pCtx->rax; break;
9728 case 1: u64EffAddr = pCtx->rcx; break;
9729 case 2: u64EffAddr = pCtx->rdx; break;
9730 case 3: u64EffAddr = pCtx->rbx; break;
9731 case 4: u64EffAddr = 0; /*none */ break;
9732 case 5: u64EffAddr = pCtx->rbp; break;
9733 case 6: u64EffAddr = pCtx->rsi; break;
9734 case 7: u64EffAddr = pCtx->rdi; break;
9735 case 8: u64EffAddr = pCtx->r8; break;
9736 case 9: u64EffAddr = pCtx->r9; break;
9737 case 10: u64EffAddr = pCtx->r10; break;
9738 case 11: u64EffAddr = pCtx->r11; break;
9739 case 12: u64EffAddr = pCtx->r12; break;
9740 case 13: u64EffAddr = pCtx->r13; break;
9741 case 14: u64EffAddr = pCtx->r14; break;
9742 case 15: u64EffAddr = pCtx->r15; break;
9743 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9744 }
9745 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9746
9747 /* add base */
9748 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9749 {
9750 case 0: u64EffAddr += pCtx->rax; break;
9751 case 1: u64EffAddr += pCtx->rcx; break;
9752 case 2: u64EffAddr += pCtx->rdx; break;
9753 case 3: u64EffAddr += pCtx->rbx; break;
9754 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9755 case 6: u64EffAddr += pCtx->rsi; break;
9756 case 7: u64EffAddr += pCtx->rdi; break;
9757 case 8: u64EffAddr += pCtx->r8; break;
9758 case 9: u64EffAddr += pCtx->r9; break;
9759 case 10: u64EffAddr += pCtx->r10; break;
9760 case 11: u64EffAddr += pCtx->r11; break;
9761 case 12: u64EffAddr += pCtx->r12; break;
9762 case 14: u64EffAddr += pCtx->r14; break;
9763 case 15: u64EffAddr += pCtx->r15; break;
9764 /* complicated encodings */
9765 case 5:
9766 case 13:
9767 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9768 {
9769 if (!pIemCpu->uRexB)
9770 {
9771 u64EffAddr += pCtx->rbp;
9772 SET_SS_DEF();
9773 }
9774 else
9775 u64EffAddr += pCtx->r13;
9776 }
9777 else
9778 {
9779 uint32_t u32Disp;
9780 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9781 u64EffAddr += (int32_t)u32Disp;
9782 }
9783 break;
9784 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9785 }
9786 break;
9787 }
9788 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9789 }
9790
9791 /* Get and add the displacement. */
9792 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9793 {
9794 case 0:
9795 break;
9796 case 1:
9797 {
9798 int8_t i8Disp;
9799 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9800 u64EffAddr += i8Disp;
9801 break;
9802 }
9803 case 2:
9804 {
9805 uint32_t u32Disp;
9806 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9807 u64EffAddr += (int32_t)u32Disp;
9808 break;
9809 }
9810 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9811 }
9812
9813 }
9814
9815 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9816 *pGCPtrEff = u64EffAddr;
9817 else
9818 {
9819 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9820 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9821 }
9822 }
9823
9824 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9825 return VINF_SUCCESS;
9826}
9827
9828/** @} */
9829
9830
9831
9832/*
9833 * Include the instructions
9834 */
9835#include "IEMAllInstructions.cpp.h"
9836
9837
9838
9839
9840#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9841
9842/**
9843 * Sets up execution verification mode.
9844 */
9845IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9846{
9847 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9848 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9849
9850 /*
9851 * Always note down the address of the current instruction.
9852 */
9853 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9854 pIemCpu->uOldRip = pOrgCtx->rip;
9855
9856 /*
9857 * Enable verification and/or logging.
9858 */
9859 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9860 if ( fNewNoRem
9861 && ( 0
9862#if 0 /* auto enable on first paged protected mode interrupt */
9863 || ( pOrgCtx->eflags.Bits.u1IF
9864 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9865 && TRPMHasTrap(pVCpu)
9866 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9867#endif
9868#if 0
9869 || ( pOrgCtx->cs == 0x10
9870 && ( pOrgCtx->rip == 0x90119e3e
9871 || pOrgCtx->rip == 0x901d9810)
9872#endif
9873#if 0 /* Auto enable DSL - FPU stuff. */
9874 || ( pOrgCtx->cs == 0x10
9875 && (// pOrgCtx->rip == 0xc02ec07f
9876 //|| pOrgCtx->rip == 0xc02ec082
9877 //|| pOrgCtx->rip == 0xc02ec0c9
9878 0
9879 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9880#endif
9881#if 0 /* Auto enable DSL - fstp st0 stuff. */
9882 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9883#endif
9884#if 0
9885 || pOrgCtx->rip == 0x9022bb3a
9886#endif
9887#if 0
9888 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9889#endif
9890#if 0
9891 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9892 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9893#endif
9894#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9895 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9896 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9897 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9898#endif
9899#if 0 /* NT4SP1 - xadd early boot. */
9900 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9901#endif
9902#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9903 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9904#endif
9905#if 0 /* NT4SP1 - cmpxchg (AMD). */
9906 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9907#endif
9908#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9909 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9910#endif
9911#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9912 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9913
9914#endif
9915#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9916 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9917
9918#endif
9919#if 0 /* NT4SP1 - frstor [ecx] */
9920 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9921#endif
9922#if 0 /* xxxxxx - All long mode code. */
9923 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9924#endif
9925#if 0 /* rep movsq linux 3.7 64-bit boot. */
9926 || (pOrgCtx->rip == 0x0000000000100241)
9927#endif
9928#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9929 || (pOrgCtx->rip == 0x000000000215e240)
9930#endif
9931#if 0 /* DOS's size-overridden iret to v8086. */
9932 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9933#endif
9934 )
9935 )
9936 {
9937 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9938 RTLogFlags(NULL, "enabled");
9939 fNewNoRem = false;
9940 }
9941 if (fNewNoRem != pIemCpu->fNoRem)
9942 {
9943 pIemCpu->fNoRem = fNewNoRem;
9944 if (!fNewNoRem)
9945 {
9946 LogAlways(("Enabling verification mode!\n"));
9947 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9948 }
9949 else
9950 LogAlways(("Disabling verification mode!\n"));
9951 }
9952
9953 /*
9954 * Switch state.
9955 */
9956 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9957 {
9958 static CPUMCTX s_DebugCtx; /* Ugly! */
9959
9960 s_DebugCtx = *pOrgCtx;
9961 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9962 }
9963
9964 /*
9965 * See if there is an interrupt pending in TRPM and inject it if we can.
9966 */
9967 pIemCpu->uInjectCpl = UINT8_MAX;
9968 if ( pOrgCtx->eflags.Bits.u1IF
9969 && TRPMHasTrap(pVCpu)
9970 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9971 {
9972 uint8_t u8TrapNo;
9973 TRPMEVENT enmType;
9974 RTGCUINT uErrCode;
9975 RTGCPTR uCr2;
9976 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9977 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9978 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9979 TRPMResetTrap(pVCpu);
9980 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9981 }
9982
9983 /*
9984 * Reset the counters.
9985 */
9986 pIemCpu->cIOReads = 0;
9987 pIemCpu->cIOWrites = 0;
9988 pIemCpu->fIgnoreRaxRdx = false;
9989 pIemCpu->fOverlappingMovs = false;
9990 pIemCpu->fProblematicMemory = false;
9991 pIemCpu->fUndefinedEFlags = 0;
9992
9993 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9994 {
9995 /*
9996 * Free all verification records.
9997 */
9998 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9999 pIemCpu->pIemEvtRecHead = NULL;
10000 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
10001 do
10002 {
10003 while (pEvtRec)
10004 {
10005 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
10006 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
10007 pIemCpu->pFreeEvtRec = pEvtRec;
10008 pEvtRec = pNext;
10009 }
10010 pEvtRec = pIemCpu->pOtherEvtRecHead;
10011 pIemCpu->pOtherEvtRecHead = NULL;
10012 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
10013 } while (pEvtRec);
10014 }
10015}
10016
10017
10018/**
10019 * Allocate an event record.
10020 * @returns Pointer to a record.
10021 */
10022IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
10023{
10024 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10025 return NULL;
10026
10027 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
10028 if (pEvtRec)
10029 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
10030 else
10031 {
10032 if (!pIemCpu->ppIemEvtRecNext)
10033 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
10034
10035 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
10036 if (!pEvtRec)
10037 return NULL;
10038 }
10039 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
10040 pEvtRec->pNext = NULL;
10041 return pEvtRec;
10042}
10043
10044
10045/**
10046 * IOMMMIORead notification.
10047 */
10048VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
10049{
10050 PVMCPU pVCpu = VMMGetCpu(pVM);
10051 if (!pVCpu)
10052 return;
10053 PIEMCPU pIemCpu = &pVCpu->iem.s;
10054 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10055 if (!pEvtRec)
10056 return;
10057 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
10058 pEvtRec->u.RamRead.GCPhys = GCPhys;
10059 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
10060 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10061 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10062}
10063
10064
10065/**
10066 * IOMMMIOWrite notification.
10067 */
10068VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
10069{
10070 PVMCPU pVCpu = VMMGetCpu(pVM);
10071 if (!pVCpu)
10072 return;
10073 PIEMCPU pIemCpu = &pVCpu->iem.s;
10074 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10075 if (!pEvtRec)
10076 return;
10077 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
10078 pEvtRec->u.RamWrite.GCPhys = GCPhys;
10079 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
10080 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
10081 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
10082 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
10083 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
10084 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10085 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10086}
10087
10088
10089/**
10090 * IOMIOPortRead notification.
10091 */
10092VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
10093{
10094 PVMCPU pVCpu = VMMGetCpu(pVM);
10095 if (!pVCpu)
10096 return;
10097 PIEMCPU pIemCpu = &pVCpu->iem.s;
10098 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10099 if (!pEvtRec)
10100 return;
10101 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10102 pEvtRec->u.IOPortRead.Port = Port;
10103 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10104 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10105 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10106}
10107
10108/**
10109 * IOMIOPortWrite notification.
10110 */
10111VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10112{
10113 PVMCPU pVCpu = VMMGetCpu(pVM);
10114 if (!pVCpu)
10115 return;
10116 PIEMCPU pIemCpu = &pVCpu->iem.s;
10117 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10118 if (!pEvtRec)
10119 return;
10120 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10121 pEvtRec->u.IOPortWrite.Port = Port;
10122 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10123 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10124 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10125 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10126}
10127
10128
10129VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10130{
10131 PVMCPU pVCpu = VMMGetCpu(pVM);
10132 if (!pVCpu)
10133 return;
10134 PIEMCPU pIemCpu = &pVCpu->iem.s;
10135 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10136 if (!pEvtRec)
10137 return;
10138 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
10139 pEvtRec->u.IOPortStrRead.Port = Port;
10140 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
10141 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
10142 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10143 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10144}
10145
10146
10147VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10148{
10149 PVMCPU pVCpu = VMMGetCpu(pVM);
10150 if (!pVCpu)
10151 return;
10152 PIEMCPU pIemCpu = &pVCpu->iem.s;
10153 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10154 if (!pEvtRec)
10155 return;
10156 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
10157 pEvtRec->u.IOPortStrWrite.Port = Port;
10158 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
10159 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
10160 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10161 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10162}
10163
10164
10165/**
10166 * Fakes and records an I/O port read.
10167 *
10168 * @returns VINF_SUCCESS.
10169 * @param pIemCpu The IEM per CPU data.
10170 * @param Port The I/O port.
10171 * @param pu32Value Where to store the fake value.
10172 * @param cbValue The size of the access.
10173 */
10174IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10175{
10176 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10177 if (pEvtRec)
10178 {
10179 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10180 pEvtRec->u.IOPortRead.Port = Port;
10181 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10182 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10183 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10184 }
10185 pIemCpu->cIOReads++;
10186 *pu32Value = 0xcccccccc;
10187 return VINF_SUCCESS;
10188}
10189
10190
10191/**
10192 * Fakes and records an I/O port write.
10193 *
10194 * @returns VINF_SUCCESS.
10195 * @param pIemCpu The IEM per CPU data.
10196 * @param Port The I/O port.
10197 * @param u32Value The value being written.
10198 * @param cbValue The size of the access.
10199 */
10200IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10201{
10202 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10203 if (pEvtRec)
10204 {
10205 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10206 pEvtRec->u.IOPortWrite.Port = Port;
10207 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10208 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10209 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10210 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10211 }
10212 pIemCpu->cIOWrites++;
10213 return VINF_SUCCESS;
10214}
10215
10216
10217/**
10218 * Used to add extra details about a stub case.
10219 * @param pIemCpu The IEM per CPU state.
10220 */
10221IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10222{
10223 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10224 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10225 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10226 char szRegs[4096];
10227 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10228 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10229 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10230 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10231 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10232 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10233 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10234 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10235 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10236 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10237 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10238 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10239 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10240 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10241 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10242 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10243 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10244 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10245 " efer=%016VR{efer}\n"
10246 " pat=%016VR{pat}\n"
10247 " sf_mask=%016VR{sf_mask}\n"
10248 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10249 " lstar=%016VR{lstar}\n"
10250 " star=%016VR{star} cstar=%016VR{cstar}\n"
10251 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10252 );
10253
10254 char szInstr1[256];
10255 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10256 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10257 szInstr1, sizeof(szInstr1), NULL);
10258 char szInstr2[256];
10259 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10260 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10261 szInstr2, sizeof(szInstr2), NULL);
10262
10263 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10264}
10265
10266
10267/**
10268 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10269 * dump to the assertion info.
10270 *
10271 * @param pEvtRec The record to dump.
10272 */
10273IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10274{
10275 switch (pEvtRec->enmEvent)
10276 {
10277 case IEMVERIFYEVENT_IOPORT_READ:
10278 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10279 pEvtRec->u.IOPortWrite.Port,
10280 pEvtRec->u.IOPortWrite.cbValue);
10281 break;
10282 case IEMVERIFYEVENT_IOPORT_WRITE:
10283 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10284 pEvtRec->u.IOPortWrite.Port,
10285 pEvtRec->u.IOPortWrite.cbValue,
10286 pEvtRec->u.IOPortWrite.u32Value);
10287 break;
10288 case IEMVERIFYEVENT_IOPORT_STR_READ:
10289 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
10290 pEvtRec->u.IOPortStrWrite.Port,
10291 pEvtRec->u.IOPortStrWrite.cbValue,
10292 pEvtRec->u.IOPortStrWrite.cTransfers);
10293 break;
10294 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10295 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
10296 pEvtRec->u.IOPortStrWrite.Port,
10297 pEvtRec->u.IOPortStrWrite.cbValue,
10298 pEvtRec->u.IOPortStrWrite.cTransfers);
10299 break;
10300 case IEMVERIFYEVENT_RAM_READ:
10301 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10302 pEvtRec->u.RamRead.GCPhys,
10303 pEvtRec->u.RamRead.cb);
10304 break;
10305 case IEMVERIFYEVENT_RAM_WRITE:
10306 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10307 pEvtRec->u.RamWrite.GCPhys,
10308 pEvtRec->u.RamWrite.cb,
10309 (int)pEvtRec->u.RamWrite.cb,
10310 pEvtRec->u.RamWrite.ab);
10311 break;
10312 default:
10313 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10314 break;
10315 }
10316}
10317
10318
10319/**
10320 * Raises an assertion on the specified record, showing the given message with
10321 * a record dump attached.
10322 *
10323 * @param pIemCpu The IEM per CPU data.
10324 * @param pEvtRec1 The first record.
10325 * @param pEvtRec2 The second record.
10326 * @param pszMsg The message explaining why we're asserting.
10327 */
10328IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10329{
10330 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10331 iemVerifyAssertAddRecordDump(pEvtRec1);
10332 iemVerifyAssertAddRecordDump(pEvtRec2);
10333 iemVerifyAssertMsg2(pIemCpu);
10334 RTAssertPanic();
10335}
10336
10337
10338/**
10339 * Raises an assertion on the specified record, showing the given message with
10340 * a record dump attached.
10341 *
10342 * @param pIemCpu The IEM per CPU data.
10343 * @param pEvtRec1 The first record.
10344 * @param pszMsg The message explaining why we're asserting.
10345 */
10346IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10347{
10348 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10349 iemVerifyAssertAddRecordDump(pEvtRec);
10350 iemVerifyAssertMsg2(pIemCpu);
10351 RTAssertPanic();
10352}
10353
10354
10355/**
10356 * Verifies a write record.
10357 *
10358 * @param pIemCpu The IEM per CPU data.
10359 * @param pEvtRec The write record.
10360 * @param fRem Set if REM was doing the other executing. If clear
10361 * it was HM.
10362 */
10363IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10364{
10365 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10366 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10367 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10368 if ( RT_FAILURE(rc)
10369 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10370 {
10371 /* fend off ins */
10372 if ( !pIemCpu->cIOReads
10373 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10374 || ( pEvtRec->u.RamWrite.cb != 1
10375 && pEvtRec->u.RamWrite.cb != 2
10376 && pEvtRec->u.RamWrite.cb != 4) )
10377 {
10378 /* fend off ROMs and MMIO */
10379 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10380 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10381 {
10382 /* fend off fxsave */
10383 if (pEvtRec->u.RamWrite.cb != 512)
10384 {
10385 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10386 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10387 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10388 RTAssertMsg2Add("%s: %.*Rhxs\n"
10389 "iem: %.*Rhxs\n",
10390 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10391 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10392 iemVerifyAssertAddRecordDump(pEvtRec);
10393 iemVerifyAssertMsg2(pIemCpu);
10394 RTAssertPanic();
10395 }
10396 }
10397 }
10398 }
10399
10400}
10401
10402/**
10403 * Performs the post-execution verfication checks.
10404 */
10405IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10406{
10407 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10408 return;
10409
10410 /*
10411 * Switch back the state.
10412 */
10413 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10414 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10415 Assert(pOrgCtx != pDebugCtx);
10416 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10417
10418 /*
10419 * Execute the instruction in REM.
10420 */
10421 bool fRem = false;
10422 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10423 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10424 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10425#ifdef IEM_VERIFICATION_MODE_FULL_HM
10426 if ( HMIsEnabled(pVM)
10427 && pIemCpu->cIOReads == 0
10428 && pIemCpu->cIOWrites == 0
10429 && !pIemCpu->fProblematicMemory)
10430 {
10431 uint64_t uStartRip = pOrgCtx->rip;
10432 unsigned iLoops = 0;
10433 do
10434 {
10435 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10436 iLoops++;
10437 } while ( rc == VINF_SUCCESS
10438 || ( rc == VINF_EM_DBG_STEPPED
10439 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10440 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10441 || ( pOrgCtx->rip != pDebugCtx->rip
10442 && pIemCpu->uInjectCpl != UINT8_MAX
10443 && iLoops < 8) );
10444 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10445 rc = VINF_SUCCESS;
10446 }
10447#endif
10448 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10449 || rc == VINF_IOM_R3_IOPORT_READ
10450 || rc == VINF_IOM_R3_IOPORT_WRITE
10451 || rc == VINF_IOM_R3_MMIO_READ
10452 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10453 || rc == VINF_IOM_R3_MMIO_WRITE
10454 || rc == VINF_CPUM_R3_MSR_READ
10455 || rc == VINF_CPUM_R3_MSR_WRITE
10456 || rc == VINF_EM_RESCHEDULE
10457 )
10458 {
10459 EMRemLock(pVM);
10460 rc = REMR3EmulateInstruction(pVM, pVCpu);
10461 AssertRC(rc);
10462 EMRemUnlock(pVM);
10463 fRem = true;
10464 }
10465
10466 /*
10467 * Compare the register states.
10468 */
10469 unsigned cDiffs = 0;
10470 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10471 {
10472 //Log(("REM and IEM ends up with different registers!\n"));
10473 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10474
10475# define CHECK_FIELD(a_Field) \
10476 do \
10477 { \
10478 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10479 { \
10480 switch (sizeof(pOrgCtx->a_Field)) \
10481 { \
10482 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10483 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10484 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10485 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10486 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10487 } \
10488 cDiffs++; \
10489 } \
10490 } while (0)
10491# define CHECK_XSTATE_FIELD(a_Field) \
10492 do \
10493 { \
10494 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10495 { \
10496 switch (sizeof(pOrgXState->a_Field)) \
10497 { \
10498 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10499 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10500 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10501 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10502 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10503 } \
10504 cDiffs++; \
10505 } \
10506 } while (0)
10507
10508# define CHECK_BIT_FIELD(a_Field) \
10509 do \
10510 { \
10511 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10512 { \
10513 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10514 cDiffs++; \
10515 } \
10516 } while (0)
10517
10518# define CHECK_SEL(a_Sel) \
10519 do \
10520 { \
10521 CHECK_FIELD(a_Sel.Sel); \
10522 CHECK_FIELD(a_Sel.Attr.u); \
10523 CHECK_FIELD(a_Sel.u64Base); \
10524 CHECK_FIELD(a_Sel.u32Limit); \
10525 CHECK_FIELD(a_Sel.fFlags); \
10526 } while (0)
10527
10528 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10529 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10530
10531#if 1 /* The recompiler doesn't update these the intel way. */
10532 if (fRem)
10533 {
10534 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10535 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10536 pOrgXState->x87.CS = pDebugXState->x87.CS;
10537 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10538 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10539 pOrgXState->x87.DS = pDebugXState->x87.DS;
10540 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10541 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10542 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10543 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10544 }
10545#endif
10546 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10547 {
10548 RTAssertMsg2Weak(" the FPU state differs\n");
10549 cDiffs++;
10550 CHECK_XSTATE_FIELD(x87.FCW);
10551 CHECK_XSTATE_FIELD(x87.FSW);
10552 CHECK_XSTATE_FIELD(x87.FTW);
10553 CHECK_XSTATE_FIELD(x87.FOP);
10554 CHECK_XSTATE_FIELD(x87.FPUIP);
10555 CHECK_XSTATE_FIELD(x87.CS);
10556 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10557 CHECK_XSTATE_FIELD(x87.FPUDP);
10558 CHECK_XSTATE_FIELD(x87.DS);
10559 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10560 CHECK_XSTATE_FIELD(x87.MXCSR);
10561 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10562 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10563 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10564 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10565 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10566 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10567 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10568 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10569 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10570 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10571 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10572 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10573 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10574 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10575 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10576 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10577 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10578 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10579 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10580 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10581 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10582 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10583 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10584 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10585 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10586 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10587 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10588 }
10589 CHECK_FIELD(rip);
10590 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10591 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10592 {
10593 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10594 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10595 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10596 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10597 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10598 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10599 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10600 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10601 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10602 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10603 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10604 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10605 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10606 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10607 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10608 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10609 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10610 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10611 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10612 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10613 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10614 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10615 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10616 }
10617
10618 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10619 CHECK_FIELD(rax);
10620 CHECK_FIELD(rcx);
10621 if (!pIemCpu->fIgnoreRaxRdx)
10622 CHECK_FIELD(rdx);
10623 CHECK_FIELD(rbx);
10624 CHECK_FIELD(rsp);
10625 CHECK_FIELD(rbp);
10626 CHECK_FIELD(rsi);
10627 CHECK_FIELD(rdi);
10628 CHECK_FIELD(r8);
10629 CHECK_FIELD(r9);
10630 CHECK_FIELD(r10);
10631 CHECK_FIELD(r11);
10632 CHECK_FIELD(r12);
10633 CHECK_FIELD(r13);
10634 CHECK_SEL(cs);
10635 CHECK_SEL(ss);
10636 CHECK_SEL(ds);
10637 CHECK_SEL(es);
10638 CHECK_SEL(fs);
10639 CHECK_SEL(gs);
10640 CHECK_FIELD(cr0);
10641
10642 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10643 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10644 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10645 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10646 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10647 {
10648 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10649 { /* ignore */ }
10650 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10651 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10652 && fRem)
10653 { /* ignore */ }
10654 else
10655 CHECK_FIELD(cr2);
10656 }
10657 CHECK_FIELD(cr3);
10658 CHECK_FIELD(cr4);
10659 CHECK_FIELD(dr[0]);
10660 CHECK_FIELD(dr[1]);
10661 CHECK_FIELD(dr[2]);
10662 CHECK_FIELD(dr[3]);
10663 CHECK_FIELD(dr[6]);
10664 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10665 CHECK_FIELD(dr[7]);
10666 CHECK_FIELD(gdtr.cbGdt);
10667 CHECK_FIELD(gdtr.pGdt);
10668 CHECK_FIELD(idtr.cbIdt);
10669 CHECK_FIELD(idtr.pIdt);
10670 CHECK_SEL(ldtr);
10671 CHECK_SEL(tr);
10672 CHECK_FIELD(SysEnter.cs);
10673 CHECK_FIELD(SysEnter.eip);
10674 CHECK_FIELD(SysEnter.esp);
10675 CHECK_FIELD(msrEFER);
10676 CHECK_FIELD(msrSTAR);
10677 CHECK_FIELD(msrPAT);
10678 CHECK_FIELD(msrLSTAR);
10679 CHECK_FIELD(msrCSTAR);
10680 CHECK_FIELD(msrSFMASK);
10681 CHECK_FIELD(msrKERNELGSBASE);
10682
10683 if (cDiffs != 0)
10684 {
10685 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10686 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10687 iemVerifyAssertMsg2(pIemCpu);
10688 RTAssertPanic();
10689 }
10690# undef CHECK_FIELD
10691# undef CHECK_BIT_FIELD
10692 }
10693
10694 /*
10695 * If the register state compared fine, check the verification event
10696 * records.
10697 */
10698 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10699 {
10700 /*
10701 * Compare verficiation event records.
10702 * - I/O port accesses should be a 1:1 match.
10703 */
10704 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10705 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10706 while (pIemRec && pOtherRec)
10707 {
10708 /* Since we might miss RAM writes and reads, ignore reads and check
10709 that any written memory is the same extra ones. */
10710 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10711 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10712 && pIemRec->pNext)
10713 {
10714 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10715 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10716 pIemRec = pIemRec->pNext;
10717 }
10718
10719 /* Do the compare. */
10720 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10721 {
10722 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10723 break;
10724 }
10725 bool fEquals;
10726 switch (pIemRec->enmEvent)
10727 {
10728 case IEMVERIFYEVENT_IOPORT_READ:
10729 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10730 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10731 break;
10732 case IEMVERIFYEVENT_IOPORT_WRITE:
10733 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10734 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10735 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10736 break;
10737 case IEMVERIFYEVENT_IOPORT_STR_READ:
10738 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
10739 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
10740 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
10741 break;
10742 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10743 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
10744 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
10745 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
10746 break;
10747 case IEMVERIFYEVENT_RAM_READ:
10748 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10749 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10750 break;
10751 case IEMVERIFYEVENT_RAM_WRITE:
10752 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10753 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10754 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10755 break;
10756 default:
10757 fEquals = false;
10758 break;
10759 }
10760 if (!fEquals)
10761 {
10762 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10763 break;
10764 }
10765
10766 /* advance */
10767 pIemRec = pIemRec->pNext;
10768 pOtherRec = pOtherRec->pNext;
10769 }
10770
10771 /* Ignore extra writes and reads. */
10772 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10773 {
10774 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10775 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10776 pIemRec = pIemRec->pNext;
10777 }
10778 if (pIemRec != NULL)
10779 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10780 else if (pOtherRec != NULL)
10781 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10782 }
10783 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10784}
10785
10786#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10787
10788/* stubs */
10789IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10790{
10791 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10792 return VERR_INTERNAL_ERROR;
10793}
10794
10795IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10796{
10797 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10798 return VERR_INTERNAL_ERROR;
10799}
10800
10801#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10802
10803
10804#ifdef LOG_ENABLED
10805/**
10806 * Logs the current instruction.
10807 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10808 * @param pCtx The current CPU context.
10809 * @param fSameCtx Set if we have the same context information as the VMM,
10810 * clear if we may have already executed an instruction in
10811 * our debug context. When clear, we assume IEMCPU holds
10812 * valid CPU mode info.
10813 */
10814IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10815{
10816# ifdef IN_RING3
10817 if (LogIs2Enabled())
10818 {
10819 char szInstr[256];
10820 uint32_t cbInstr = 0;
10821 if (fSameCtx)
10822 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10823 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10824 szInstr, sizeof(szInstr), &cbInstr);
10825 else
10826 {
10827 uint32_t fFlags = 0;
10828 switch (pVCpu->iem.s.enmCpuMode)
10829 {
10830 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10831 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10832 case IEMMODE_16BIT:
10833 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10834 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10835 else
10836 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10837 break;
10838 }
10839 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10840 szInstr, sizeof(szInstr), &cbInstr);
10841 }
10842
10843 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10844 Log2(("****\n"
10845 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10846 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10847 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10848 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10849 " %s\n"
10850 ,
10851 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10852 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10853 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10854 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10855 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10856 szInstr));
10857
10858 if (LogIs3Enabled())
10859 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10860 }
10861 else
10862# endif
10863 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10864 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10865}
10866#endif
10867
10868
10869/**
10870 * Makes status code addjustments (pass up from I/O and access handler)
10871 * as well as maintaining statistics.
10872 *
10873 * @returns Strict VBox status code to pass up.
10874 * @param pIemCpu The IEM per CPU data.
10875 * @param rcStrict The status from executing an instruction.
10876 */
10877DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10878{
10879 if (rcStrict != VINF_SUCCESS)
10880 {
10881 if (RT_SUCCESS(rcStrict))
10882 {
10883 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10884 || rcStrict == VINF_IOM_R3_IOPORT_READ
10885 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10886 || rcStrict == VINF_IOM_R3_MMIO_READ
10887 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10888 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10889 || rcStrict == VINF_CPUM_R3_MSR_READ
10890 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10891 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10892 || rcStrict == VINF_EM_RAW_TO_R3
10893 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
10894 /* raw-mode / virt handlers only: */
10895 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10896 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10897 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10898 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10899 || rcStrict == VINF_SELM_SYNC_GDT
10900 || rcStrict == VINF_CSAM_PENDING_ACTION
10901 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10902 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10903/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10904 int32_t const rcPassUp = pIemCpu->rcPassUp;
10905 if (rcPassUp == VINF_SUCCESS)
10906 pIemCpu->cRetInfStatuses++;
10907 else if ( rcPassUp < VINF_EM_FIRST
10908 || rcPassUp > VINF_EM_LAST
10909 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10910 {
10911 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10912 pIemCpu->cRetPassUpStatus++;
10913 rcStrict = rcPassUp;
10914 }
10915 else
10916 {
10917 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10918 pIemCpu->cRetInfStatuses++;
10919 }
10920 }
10921 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10922 pIemCpu->cRetAspectNotImplemented++;
10923 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10924 pIemCpu->cRetInstrNotImplemented++;
10925#ifdef IEM_VERIFICATION_MODE_FULL
10926 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10927 rcStrict = VINF_SUCCESS;
10928#endif
10929 else
10930 pIemCpu->cRetErrStatuses++;
10931 }
10932 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10933 {
10934 pIemCpu->cRetPassUpStatus++;
10935 rcStrict = pIemCpu->rcPassUp;
10936 }
10937
10938 return rcStrict;
10939}
10940
10941
10942/**
10943 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10944 * IEMExecOneWithPrefetchedByPC.
10945 *
10946 * @return Strict VBox status code.
10947 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10948 * @param pIemCpu The IEM per CPU data.
10949 * @param fExecuteInhibit If set, execute the instruction following CLI,
10950 * POP SS and MOV SS,GR.
10951 */
10952DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10953{
10954 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10955 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10956 if (rcStrict == VINF_SUCCESS)
10957 pIemCpu->cInstructions++;
10958 if (pIemCpu->cActiveMappings > 0)
10959 iemMemRollback(pIemCpu);
10960//#ifdef DEBUG
10961// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10962//#endif
10963
10964 /* Execute the next instruction as well if a cli, pop ss or
10965 mov ss, Gr has just completed successfully. */
10966 if ( fExecuteInhibit
10967 && rcStrict == VINF_SUCCESS
10968 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10969 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10970 {
10971 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10972 if (rcStrict == VINF_SUCCESS)
10973 {
10974# ifdef LOG_ENABLED
10975 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10976# endif
10977 IEM_OPCODE_GET_NEXT_U8(&b);
10978 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10979 if (rcStrict == VINF_SUCCESS)
10980 pIemCpu->cInstructions++;
10981 if (pIemCpu->cActiveMappings > 0)
10982 iemMemRollback(pIemCpu);
10983 }
10984 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10985 }
10986
10987 /*
10988 * Return value fiddling, statistics and sanity assertions.
10989 */
10990 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10991
10992 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10993 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10994#if defined(IEM_VERIFICATION_MODE_FULL)
10995 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10996 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10997 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10998 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10999#endif
11000 return rcStrict;
11001}
11002
11003
11004#ifdef IN_RC
11005/**
11006 * Re-enters raw-mode or ensure we return to ring-3.
11007 *
11008 * @returns rcStrict, maybe modified.
11009 * @param pIemCpu The IEM CPU structure.
11010 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11011 * @param pCtx The current CPU context.
11012 * @param rcStrict The status code returne by the interpreter.
11013 */
11014DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
11015{
11016 if (!pIemCpu->fInPatchCode)
11017 CPUMRawEnter(pVCpu);
11018 return rcStrict;
11019}
11020#endif
11021
11022
11023/**
11024 * Execute one instruction.
11025 *
11026 * @return Strict VBox status code.
11027 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11028 */
11029VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
11030{
11031 PIEMCPU pIemCpu = &pVCpu->iem.s;
11032
11033#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11034 iemExecVerificationModeSetup(pIemCpu);
11035#endif
11036#ifdef LOG_ENABLED
11037 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11038 iemLogCurInstr(pVCpu, pCtx, true);
11039#endif
11040
11041 /*
11042 * Do the decoding and emulation.
11043 */
11044 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11045 if (rcStrict == VINF_SUCCESS)
11046 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11047
11048#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11049 /*
11050 * Assert some sanity.
11051 */
11052 iemExecVerificationModeCheck(pIemCpu);
11053#endif
11054#ifdef IN_RC
11055 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11056#endif
11057 if (rcStrict != VINF_SUCCESS)
11058 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11059 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11060 return rcStrict;
11061}
11062
11063
11064VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11065{
11066 PIEMCPU pIemCpu = &pVCpu->iem.s;
11067 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11068 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11069
11070 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11071 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11072 if (rcStrict == VINF_SUCCESS)
11073 {
11074 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11075 if (pcbWritten)
11076 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11077 }
11078
11079#ifdef IN_RC
11080 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11081#endif
11082 return rcStrict;
11083}
11084
11085
11086VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11087 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11088{
11089 PIEMCPU pIemCpu = &pVCpu->iem.s;
11090 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11091 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11092
11093 VBOXSTRICTRC rcStrict;
11094 if ( cbOpcodeBytes
11095 && pCtx->rip == OpcodeBytesPC)
11096 {
11097 iemInitDecoder(pIemCpu, false);
11098 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11099 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11100 rcStrict = VINF_SUCCESS;
11101 }
11102 else
11103 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11104 if (rcStrict == VINF_SUCCESS)
11105 {
11106 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11107 }
11108
11109#ifdef IN_RC
11110 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11111#endif
11112 return rcStrict;
11113}
11114
11115
11116VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11117{
11118 PIEMCPU pIemCpu = &pVCpu->iem.s;
11119 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11120 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11121
11122 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11123 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11124 if (rcStrict == VINF_SUCCESS)
11125 {
11126 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11127 if (pcbWritten)
11128 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11129 }
11130
11131#ifdef IN_RC
11132 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11133#endif
11134 return rcStrict;
11135}
11136
11137
11138VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11139 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11140{
11141 PIEMCPU pIemCpu = &pVCpu->iem.s;
11142 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11143 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11144
11145 VBOXSTRICTRC rcStrict;
11146 if ( cbOpcodeBytes
11147 && pCtx->rip == OpcodeBytesPC)
11148 {
11149 iemInitDecoder(pIemCpu, true);
11150 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11151 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11152 rcStrict = VINF_SUCCESS;
11153 }
11154 else
11155 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11156 if (rcStrict == VINF_SUCCESS)
11157 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11158
11159#ifdef IN_RC
11160 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11161#endif
11162 return rcStrict;
11163}
11164
11165
11166VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11167{
11168 PIEMCPU pIemCpu = &pVCpu->iem.s;
11169
11170 /*
11171 * See if there is an interrupt pending in TRPM and inject it if we can.
11172 */
11173#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11174 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11175# ifdef IEM_VERIFICATION_MODE_FULL
11176 pIemCpu->uInjectCpl = UINT8_MAX;
11177# endif
11178 if ( pCtx->eflags.Bits.u1IF
11179 && TRPMHasTrap(pVCpu)
11180 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11181 {
11182 uint8_t u8TrapNo;
11183 TRPMEVENT enmType;
11184 RTGCUINT uErrCode;
11185 RTGCPTR uCr2;
11186 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11187 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11188 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11189 TRPMResetTrap(pVCpu);
11190 }
11191#else
11192 iemExecVerificationModeSetup(pIemCpu);
11193 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11194#endif
11195
11196 /*
11197 * Log the state.
11198 */
11199#ifdef LOG_ENABLED
11200 iemLogCurInstr(pVCpu, pCtx, true);
11201#endif
11202
11203 /*
11204 * Do the decoding and emulation.
11205 */
11206 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11207 if (rcStrict == VINF_SUCCESS)
11208 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11209
11210#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11211 /*
11212 * Assert some sanity.
11213 */
11214 iemExecVerificationModeCheck(pIemCpu);
11215#endif
11216
11217 /*
11218 * Maybe re-enter raw-mode and log.
11219 */
11220#ifdef IN_RC
11221 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11222#endif
11223 if (rcStrict != VINF_SUCCESS)
11224 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11225 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11226 return rcStrict;
11227}
11228
11229
11230
11231/**
11232 * Injects a trap, fault, abort, software interrupt or external interrupt.
11233 *
11234 * The parameter list matches TRPMQueryTrapAll pretty closely.
11235 *
11236 * @returns Strict VBox status code.
11237 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11238 * @param u8TrapNo The trap number.
11239 * @param enmType What type is it (trap/fault/abort), software
11240 * interrupt or hardware interrupt.
11241 * @param uErrCode The error code if applicable.
11242 * @param uCr2 The CR2 value if applicable.
11243 * @param cbInstr The instruction length (only relevant for
11244 * software interrupts).
11245 */
11246VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11247 uint8_t cbInstr)
11248{
11249 iemInitDecoder(&pVCpu->iem.s, false);
11250#ifdef DBGFTRACE_ENABLED
11251 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11252 u8TrapNo, enmType, uErrCode, uCr2);
11253#endif
11254
11255 uint32_t fFlags;
11256 switch (enmType)
11257 {
11258 case TRPM_HARDWARE_INT:
11259 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11260 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11261 uErrCode = uCr2 = 0;
11262 break;
11263
11264 case TRPM_SOFTWARE_INT:
11265 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11266 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11267 uErrCode = uCr2 = 0;
11268 break;
11269
11270 case TRPM_TRAP:
11271 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11272 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11273 if (u8TrapNo == X86_XCPT_PF)
11274 fFlags |= IEM_XCPT_FLAGS_CR2;
11275 switch (u8TrapNo)
11276 {
11277 case X86_XCPT_DF:
11278 case X86_XCPT_TS:
11279 case X86_XCPT_NP:
11280 case X86_XCPT_SS:
11281 case X86_XCPT_PF:
11282 case X86_XCPT_AC:
11283 fFlags |= IEM_XCPT_FLAGS_ERR;
11284 break;
11285
11286 case X86_XCPT_NMI:
11287 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11288 break;
11289 }
11290 break;
11291
11292 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11293 }
11294
11295 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11296}
11297
11298
11299/**
11300 * Injects the active TRPM event.
11301 *
11302 * @returns Strict VBox status code.
11303 * @param pVCpu The cross context virtual CPU structure.
11304 */
11305VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11306{
11307#ifndef IEM_IMPLEMENTS_TASKSWITCH
11308 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11309#else
11310 uint8_t u8TrapNo;
11311 TRPMEVENT enmType;
11312 RTGCUINT uErrCode;
11313 RTGCUINTPTR uCr2;
11314 uint8_t cbInstr;
11315 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11316 if (RT_FAILURE(rc))
11317 return rc;
11318
11319 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11320
11321 /** @todo Are there any other codes that imply the event was successfully
11322 * delivered to the guest? See @bugref{6607}. */
11323 if ( rcStrict == VINF_SUCCESS
11324 || rcStrict == VINF_IEM_RAISED_XCPT)
11325 {
11326 TRPMResetTrap(pVCpu);
11327 }
11328 return rcStrict;
11329#endif
11330}
11331
11332
11333VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11334{
11335 return VERR_NOT_IMPLEMENTED;
11336}
11337
11338
11339VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11340{
11341 return VERR_NOT_IMPLEMENTED;
11342}
11343
11344
11345#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11346/**
11347 * Executes a IRET instruction with default operand size.
11348 *
11349 * This is for PATM.
11350 *
11351 * @returns VBox status code.
11352 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11353 * @param pCtxCore The register frame.
11354 */
11355VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11356{
11357 PIEMCPU pIemCpu = &pVCpu->iem.s;
11358 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11359
11360 iemCtxCoreToCtx(pCtx, pCtxCore);
11361 iemInitDecoder(pIemCpu);
11362 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11363 if (rcStrict == VINF_SUCCESS)
11364 iemCtxToCtxCore(pCtxCore, pCtx);
11365 else
11366 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11367 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11368 return rcStrict;
11369}
11370#endif
11371
11372
11373/**
11374 * Macro used by the IEMExec* method to check the given instruction length.
11375 *
11376 * Will return on failure!
11377 *
11378 * @param a_cbInstr The given instruction length.
11379 * @param a_cbMin The minimum length.
11380 */
11381#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11382 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11383 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11384
11385
11386/**
11387 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11388 *
11389 * This API ASSUMES that the caller has already verified that the guest code is
11390 * allowed to access the I/O port. (The I/O port is in the DX register in the
11391 * guest state.)
11392 *
11393 * @returns Strict VBox status code.
11394 * @param pVCpu The cross context virtual CPU structure.
11395 * @param cbValue The size of the I/O port access (1, 2, or 4).
11396 * @param enmAddrMode The addressing mode.
11397 * @param fRepPrefix Indicates whether a repeat prefix is used
11398 * (doesn't matter which for this instruction).
11399 * @param cbInstr The instruction length in bytes.
11400 * @param iEffSeg The effective segment address.
11401 */
11402VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11403 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11404{
11405 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11406 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11407
11408 /*
11409 * State init.
11410 */
11411 PIEMCPU pIemCpu = &pVCpu->iem.s;
11412 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11413
11414 /*
11415 * Switch orgy for getting to the right handler.
11416 */
11417 VBOXSTRICTRC rcStrict;
11418 if (fRepPrefix)
11419 {
11420 switch (enmAddrMode)
11421 {
11422 case IEMMODE_16BIT:
11423 switch (cbValue)
11424 {
11425 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11426 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11427 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11428 default:
11429 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11430 }
11431 break;
11432
11433 case IEMMODE_32BIT:
11434 switch (cbValue)
11435 {
11436 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11437 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11438 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11439 default:
11440 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11441 }
11442 break;
11443
11444 case IEMMODE_64BIT:
11445 switch (cbValue)
11446 {
11447 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11448 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11449 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11450 default:
11451 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11452 }
11453 break;
11454
11455 default:
11456 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11457 }
11458 }
11459 else
11460 {
11461 switch (enmAddrMode)
11462 {
11463 case IEMMODE_16BIT:
11464 switch (cbValue)
11465 {
11466 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11467 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11468 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11469 default:
11470 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11471 }
11472 break;
11473
11474 case IEMMODE_32BIT:
11475 switch (cbValue)
11476 {
11477 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11478 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11479 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11480 default:
11481 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11482 }
11483 break;
11484
11485 case IEMMODE_64BIT:
11486 switch (cbValue)
11487 {
11488 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11489 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11490 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11491 default:
11492 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11493 }
11494 break;
11495
11496 default:
11497 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11498 }
11499 }
11500
11501 iemUninitExec(pIemCpu);
11502 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11503}
11504
11505
11506/**
11507 * Interface for HM and EM for executing string I/O IN (read) instructions.
11508 *
11509 * This API ASSUMES that the caller has already verified that the guest code is
11510 * allowed to access the I/O port. (The I/O port is in the DX register in the
11511 * guest state.)
11512 *
11513 * @returns Strict VBox status code.
11514 * @param pVCpu The cross context virtual CPU structure.
11515 * @param cbValue The size of the I/O port access (1, 2, or 4).
11516 * @param enmAddrMode The addressing mode.
11517 * @param fRepPrefix Indicates whether a repeat prefix is used
11518 * (doesn't matter which for this instruction).
11519 * @param cbInstr The instruction length in bytes.
11520 */
11521VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11522 bool fRepPrefix, uint8_t cbInstr)
11523{
11524 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11525
11526 /*
11527 * State init.
11528 */
11529 PIEMCPU pIemCpu = &pVCpu->iem.s;
11530 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11531
11532 /*
11533 * Switch orgy for getting to the right handler.
11534 */
11535 VBOXSTRICTRC rcStrict;
11536 if (fRepPrefix)
11537 {
11538 switch (enmAddrMode)
11539 {
11540 case IEMMODE_16BIT:
11541 switch (cbValue)
11542 {
11543 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11544 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11545 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11546 default:
11547 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11548 }
11549 break;
11550
11551 case IEMMODE_32BIT:
11552 switch (cbValue)
11553 {
11554 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11555 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11556 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11557 default:
11558 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11559 }
11560 break;
11561
11562 case IEMMODE_64BIT:
11563 switch (cbValue)
11564 {
11565 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11566 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11567 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11568 default:
11569 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11570 }
11571 break;
11572
11573 default:
11574 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11575 }
11576 }
11577 else
11578 {
11579 switch (enmAddrMode)
11580 {
11581 case IEMMODE_16BIT:
11582 switch (cbValue)
11583 {
11584 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11585 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11586 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11587 default:
11588 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11589 }
11590 break;
11591
11592 case IEMMODE_32BIT:
11593 switch (cbValue)
11594 {
11595 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11596 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11597 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11598 default:
11599 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11600 }
11601 break;
11602
11603 case IEMMODE_64BIT:
11604 switch (cbValue)
11605 {
11606 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11607 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11608 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11609 default:
11610 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11611 }
11612 break;
11613
11614 default:
11615 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11616 }
11617 }
11618
11619 iemUninitExec(pIemCpu);
11620 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11621}
11622
11623
11624
11625/**
11626 * Interface for HM and EM to write to a CRx register.
11627 *
11628 * @returns Strict VBox status code.
11629 * @param pVCpu The cross context virtual CPU structure.
11630 * @param cbInstr The instruction length in bytes.
11631 * @param iCrReg The control register number (destination).
11632 * @param iGReg The general purpose register number (source).
11633 *
11634 * @remarks In ring-0 not all of the state needs to be synced in.
11635 */
11636VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11637{
11638 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11639 Assert(iCrReg < 16);
11640 Assert(iGReg < 16);
11641
11642 PIEMCPU pIemCpu = &pVCpu->iem.s;
11643 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11644 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11645 iemUninitExec(pIemCpu);
11646 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11647}
11648
11649
11650/**
11651 * Interface for HM and EM to read from a CRx register.
11652 *
11653 * @returns Strict VBox status code.
11654 * @param pVCpu The cross context virtual CPU structure.
11655 * @param cbInstr The instruction length in bytes.
11656 * @param iGReg The general purpose register number (destination).
11657 * @param iCrReg The control register number (source).
11658 *
11659 * @remarks In ring-0 not all of the state needs to be synced in.
11660 */
11661VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11662{
11663 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11664 Assert(iCrReg < 16);
11665 Assert(iGReg < 16);
11666
11667 PIEMCPU pIemCpu = &pVCpu->iem.s;
11668 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11669 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11670 iemUninitExec(pIemCpu);
11671 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11672}
11673
11674
11675/**
11676 * Interface for HM and EM to clear the CR0[TS] bit.
11677 *
11678 * @returns Strict VBox status code.
11679 * @param pVCpu The cross context virtual CPU structure.
11680 * @param cbInstr The instruction length in bytes.
11681 *
11682 * @remarks In ring-0 not all of the state needs to be synced in.
11683 */
11684VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11685{
11686 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11687
11688 PIEMCPU pIemCpu = &pVCpu->iem.s;
11689 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11690 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11691 iemUninitExec(pIemCpu);
11692 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11693}
11694
11695
11696/**
11697 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11698 *
11699 * @returns Strict VBox status code.
11700 * @param pVCpu The cross context virtual CPU structure.
11701 * @param cbInstr The instruction length in bytes.
11702 * @param uValue The value to load into CR0.
11703 *
11704 * @remarks In ring-0 not all of the state needs to be synced in.
11705 */
11706VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11707{
11708 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11709
11710 PIEMCPU pIemCpu = &pVCpu->iem.s;
11711 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11712 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11713 iemUninitExec(pIemCpu);
11714 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11715}
11716
11717
11718/**
11719 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11720 *
11721 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11722 *
11723 * @returns Strict VBox status code.
11724 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11725 * @param cbInstr The instruction length in bytes.
11726 * @remarks In ring-0 not all of the state needs to be synced in.
11727 * @thread EMT(pVCpu)
11728 */
11729VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11730{
11731 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11732
11733 PIEMCPU pIemCpu = &pVCpu->iem.s;
11734 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11735 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11736 iemUninitExec(pIemCpu);
11737 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11738}
11739
11740#ifdef IN_RING3
11741
11742/**
11743 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11744 *
11745 * @returns Merge between @a rcStrict and what the commit operation returned.
11746 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11747 * @param rcStrict The status code returned by ring-0 or raw-mode.
11748 */
11749VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3DoPendingAction(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
11750{
11751 PIEMCPU pIemCpu = &pVCpu->iem.s;
11752
11753 /*
11754 * Retrieve and reset the pending commit.
11755 */
11756 IEMCOMMIT const enmFn = pIemCpu->PendingCommit.enmFn;
11757 pIemCpu->PendingCommit.enmFn = IEMCOMMIT_INVALID;
11758 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11759
11760 /*
11761 * Must reset pass-up status code.
11762 */
11763 pIemCpu->rcPassUp = VINF_SUCCESS;
11764
11765 /*
11766 * Call the function. Currently using switch here instead of function
11767 * pointer table as a switch won't get skewed.
11768 */
11769 VBOXSTRICTRC rcStrictCommit;
11770 switch (enmFn)
11771 {
11772 case IEMCOMMIT_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11773 case IEMCOMMIT_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11774 case IEMCOMMIT_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11775 case IEMCOMMIT_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11776 case IEMCOMMIT_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11777 case IEMCOMMIT_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11778 case IEMCOMMIT_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11779 case IEMCOMMIT_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11780 case IEMCOMMIT_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11781 case IEMCOMMIT_REP_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11782 case IEMCOMMIT_REP_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11783 case IEMCOMMIT_REP_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11784 case IEMCOMMIT_REP_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11785 case IEMCOMMIT_REP_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11786 case IEMCOMMIT_REP_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11787 case IEMCOMMIT_REP_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11788 case IEMCOMMIT_REP_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11789 case IEMCOMMIT_REP_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11790 default:
11791 AssertLogRelMsgFailedReturn(("enmFn=%#x (%d)\n", pIemCpu->PendingCommit.enmFn, pIemCpu->PendingCommit.enmFn), VERR_IEM_IPE_2);
11792 }
11793
11794 /*
11795 * Merge status code (if any) with the incomming one.
11796 */
11797 rcStrictCommit = iemExecStatusCodeFiddling(pIemCpu, rcStrictCommit);
11798 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11799 return rcStrict;
11800 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11801 return rcStrictCommit;
11802
11803 /* Complicated. */
11804 if (RT_FAILURE(rcStrict))
11805 return rcStrict;
11806 if (RT_FAILURE(rcStrictCommit))
11807 return rcStrictCommit;
11808 if ( rcStrict >= VINF_EM_FIRST
11809 && rcStrict <= VINF_EM_LAST)
11810 {
11811 if ( rcStrictCommit >= VINF_EM_FIRST
11812 && rcStrictCommit <= VINF_EM_LAST)
11813 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11814
11815 /* This really shouldn't happen. Check PGM + handler code! */
11816 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_1);
11817 }
11818 /* This shouldn't really happen either, see IOM_SUCCESS. */
11819 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_2);
11820}
11821
11822#endif /* IN_RING */
11823
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette