VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 59563

Last change on this file since 59563 was 59563, checked in by vboxsync, 9 years ago

IEM: Updated todo. Unwated blank line.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 443.1 KB
Line 
1/* $Id: IEMAll.cpp 59563 2016-02-02 18:26:24Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 *
72 */
73
74/** @def IEM_VERIFICATION_MODE_MINIMAL
75 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
76 * context. */
77#if defined(DOXYGEN_RUNNING)
78# define IEM_VERIFICATION_MODE_MINIMAL
79#endif
80//#define IEM_LOG_MEMORY_WRITES
81#define IEM_IMPLEMENTS_TASKSWITCH
82
83
84/*********************************************************************************************************************************
85* Header Files *
86*********************************************************************************************************************************/
87#define LOG_GROUP LOG_GROUP_IEM
88#include <VBox/vmm/iem.h>
89#include <VBox/vmm/cpum.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <internal/pgm.h>
93#include <VBox/vmm/iom.h>
94#include <VBox/vmm/em.h>
95#include <VBox/vmm/hm.h>
96#include <VBox/vmm/tm.h>
97#include <VBox/vmm/dbgf.h>
98#include <VBox/vmm/dbgftrace.h>
99#ifdef VBOX_WITH_RAW_MODE_NOT_R0
100# include <VBox/vmm/patm.h>
101# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
102# include <VBox/vmm/csam.h>
103# endif
104#endif
105#include "IEMInternal.h"
106#ifdef IEM_VERIFICATION_MODE_FULL
107# include <VBox/vmm/rem.h>
108# include <VBox/vmm/mm.h>
109#endif
110#include <VBox/vmm/vm.h>
111#include <VBox/log.h>
112#include <VBox/err.h>
113#include <VBox/param.h>
114#include <VBox/dis.h>
115#include <VBox/disopcode.h>
116#include <iprt/assert.h>
117#include <iprt/string.h>
118#include <iprt/x86.h>
119
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/** @typedef PFNIEMOP
126 * Pointer to an opcode decoder function.
127 */
128
129/** @def FNIEMOP_DEF
130 * Define an opcode decoder function.
131 *
132 * We're using macors for this so that adding and removing parameters as well as
133 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
134 *
135 * @param a_Name The function name.
136 */
137
138
139#if defined(__GNUC__) && defined(RT_ARCH_X86)
140typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
141# define FNIEMOP_DEF(a_Name) \
142 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
143# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
144 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
145# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
146 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
147
148#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
149typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
150# define FNIEMOP_DEF(a_Name) \
151 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
152# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
153 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
154# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
155 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
156
157#elif defined(__GNUC__)
158typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
159# define FNIEMOP_DEF(a_Name) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
161# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
163# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
165
166#else
167typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
168# define FNIEMOP_DEF(a_Name) \
169 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
170# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
171 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
173 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
174
175#endif
176
177
178/**
179 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
180 */
181typedef union IEMSELDESC
182{
183 /** The legacy view. */
184 X86DESC Legacy;
185 /** The long mode view. */
186 X86DESC64 Long;
187} IEMSELDESC;
188/** Pointer to a selector descriptor table entry. */
189typedef IEMSELDESC *PIEMSELDESC;
190
191
192/*********************************************************************************************************************************
193* Defined Constants And Macros *
194*********************************************************************************************************************************/
195/** Temporary hack to disable the double execution. Will be removed in favor
196 * of a dedicated execution mode in EM. */
197//#define IEM_VERIFICATION_MODE_NO_REM
198
199/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
200 * due to GCC lacking knowledge about the value range of a switch. */
201#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
202
203/**
204 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
205 * occation.
206 */
207#ifdef LOG_ENABLED
208# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
209 do { \
210 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
212 } while (0)
213#else
214# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
215 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
216#endif
217
218/**
219 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
220 * occation using the supplied logger statement.
221 *
222 * @param a_LoggerArgs What to log on failure.
223 */
224#ifdef LOG_ENABLED
225# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
226 do { \
227 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
228 /*LogFunc(a_LoggerArgs);*/ \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
230 } while (0)
231#else
232# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
233 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
234#endif
235
236/**
237 * Call an opcode decoder function.
238 *
239 * We're using macors for this so that adding and removing parameters can be
240 * done as we please. See FNIEMOP_DEF.
241 */
242#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
243
244/**
245 * Call a common opcode decoder function taking one extra argument.
246 *
247 * We're using macors for this so that adding and removing parameters can be
248 * done as we please. See FNIEMOP_DEF_1.
249 */
250#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
251
252/**
253 * Call a common opcode decoder function taking one extra argument.
254 *
255 * We're using macors for this so that adding and removing parameters can be
256 * done as we please. See FNIEMOP_DEF_1.
257 */
258#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
259
260/**
261 * Check if we're currently executing in real or virtual 8086 mode.
262 *
263 * @returns @c true if it is, @c false if not.
264 * @param a_pIemCpu The IEM state of the current CPU.
265 */
266#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
267
268/**
269 * Check if we're currently executing in virtual 8086 mode.
270 *
271 * @returns @c true if it is, @c false if not.
272 * @param a_pIemCpu The IEM state of the current CPU.
273 */
274#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
275
276/**
277 * Check if we're currently executing in long mode.
278 *
279 * @returns @c true if it is, @c false if not.
280 * @param a_pIemCpu The IEM state of the current CPU.
281 */
282#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
283
284/**
285 * Check if we're currently executing in real mode.
286 *
287 * @returns @c true if it is, @c false if not.
288 * @param a_pIemCpu The IEM state of the current CPU.
289 */
290#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
291
292/**
293 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
294 * @returns PCCPUMFEATURES
295 * @param a_pIemCpu The IEM state of the current CPU.
296 */
297#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
298
299/**
300 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
301 * @returns PCCPUMFEATURES
302 * @param a_pIemCpu The IEM state of the current CPU.
303 */
304#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
305
306/**
307 * Evaluates to true if we're presenting an Intel CPU to the guest.
308 */
309#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
310
311/**
312 * Evaluates to true if we're presenting an AMD CPU to the guest.
313 */
314#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
315
316/**
317 * Check if the address is canonical.
318 */
319#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
320
321
322/*********************************************************************************************************************************
323* Global Variables *
324*********************************************************************************************************************************/
325extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
326
327
328/** Function table for the ADD instruction. */
329IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
330{
331 iemAImpl_add_u8, iemAImpl_add_u8_locked,
332 iemAImpl_add_u16, iemAImpl_add_u16_locked,
333 iemAImpl_add_u32, iemAImpl_add_u32_locked,
334 iemAImpl_add_u64, iemAImpl_add_u64_locked
335};
336
337/** Function table for the ADC instruction. */
338IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
339{
340 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
341 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
342 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
343 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
344};
345
346/** Function table for the SUB instruction. */
347IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
348{
349 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
350 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
351 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
352 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
353};
354
355/** Function table for the SBB instruction. */
356IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
357{
358 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
359 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
360 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
361 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
362};
363
364/** Function table for the OR instruction. */
365IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
366{
367 iemAImpl_or_u8, iemAImpl_or_u8_locked,
368 iemAImpl_or_u16, iemAImpl_or_u16_locked,
369 iemAImpl_or_u32, iemAImpl_or_u32_locked,
370 iemAImpl_or_u64, iemAImpl_or_u64_locked
371};
372
373/** Function table for the XOR instruction. */
374IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
375{
376 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
377 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
378 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
379 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
380};
381
382/** Function table for the AND instruction. */
383IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
384{
385 iemAImpl_and_u8, iemAImpl_and_u8_locked,
386 iemAImpl_and_u16, iemAImpl_and_u16_locked,
387 iemAImpl_and_u32, iemAImpl_and_u32_locked,
388 iemAImpl_and_u64, iemAImpl_and_u64_locked
389};
390
391/** Function table for the CMP instruction.
392 * @remarks Making operand order ASSUMPTIONS.
393 */
394IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
395{
396 iemAImpl_cmp_u8, NULL,
397 iemAImpl_cmp_u16, NULL,
398 iemAImpl_cmp_u32, NULL,
399 iemAImpl_cmp_u64, NULL
400};
401
402/** Function table for the TEST instruction.
403 * @remarks Making operand order ASSUMPTIONS.
404 */
405IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
406{
407 iemAImpl_test_u8, NULL,
408 iemAImpl_test_u16, NULL,
409 iemAImpl_test_u32, NULL,
410 iemAImpl_test_u64, NULL
411};
412
413/** Function table for the BT instruction. */
414IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
415{
416 NULL, NULL,
417 iemAImpl_bt_u16, NULL,
418 iemAImpl_bt_u32, NULL,
419 iemAImpl_bt_u64, NULL
420};
421
422/** Function table for the BTC instruction. */
423IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
424{
425 NULL, NULL,
426 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
427 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
428 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
429};
430
431/** Function table for the BTR instruction. */
432IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
433{
434 NULL, NULL,
435 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
436 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
437 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
438};
439
440/** Function table for the BTS instruction. */
441IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
442{
443 NULL, NULL,
444 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
445 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
446 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
447};
448
449/** Function table for the BSF instruction. */
450IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
451{
452 NULL, NULL,
453 iemAImpl_bsf_u16, NULL,
454 iemAImpl_bsf_u32, NULL,
455 iemAImpl_bsf_u64, NULL
456};
457
458/** Function table for the BSR instruction. */
459IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
460{
461 NULL, NULL,
462 iemAImpl_bsr_u16, NULL,
463 iemAImpl_bsr_u32, NULL,
464 iemAImpl_bsr_u64, NULL
465};
466
467/** Function table for the IMUL instruction. */
468IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
469{
470 NULL, NULL,
471 iemAImpl_imul_two_u16, NULL,
472 iemAImpl_imul_two_u32, NULL,
473 iemAImpl_imul_two_u64, NULL
474};
475
476/** Group 1 /r lookup table. */
477IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
478{
479 &g_iemAImpl_add,
480 &g_iemAImpl_or,
481 &g_iemAImpl_adc,
482 &g_iemAImpl_sbb,
483 &g_iemAImpl_and,
484 &g_iemAImpl_sub,
485 &g_iemAImpl_xor,
486 &g_iemAImpl_cmp
487};
488
489/** Function table for the INC instruction. */
490IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
491{
492 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
493 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
494 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
495 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
496};
497
498/** Function table for the DEC instruction. */
499IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
500{
501 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
502 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
503 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
504 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
505};
506
507/** Function table for the NEG instruction. */
508IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
509{
510 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
511 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
512 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
513 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
514};
515
516/** Function table for the NOT instruction. */
517IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
518{
519 iemAImpl_not_u8, iemAImpl_not_u8_locked,
520 iemAImpl_not_u16, iemAImpl_not_u16_locked,
521 iemAImpl_not_u32, iemAImpl_not_u32_locked,
522 iemAImpl_not_u64, iemAImpl_not_u64_locked
523};
524
525
526/** Function table for the ROL instruction. */
527IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
528{
529 iemAImpl_rol_u8,
530 iemAImpl_rol_u16,
531 iemAImpl_rol_u32,
532 iemAImpl_rol_u64
533};
534
535/** Function table for the ROR instruction. */
536IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
537{
538 iemAImpl_ror_u8,
539 iemAImpl_ror_u16,
540 iemAImpl_ror_u32,
541 iemAImpl_ror_u64
542};
543
544/** Function table for the RCL instruction. */
545IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
546{
547 iemAImpl_rcl_u8,
548 iemAImpl_rcl_u16,
549 iemAImpl_rcl_u32,
550 iemAImpl_rcl_u64
551};
552
553/** Function table for the RCR instruction. */
554IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
555{
556 iemAImpl_rcr_u8,
557 iemAImpl_rcr_u16,
558 iemAImpl_rcr_u32,
559 iemAImpl_rcr_u64
560};
561
562/** Function table for the SHL instruction. */
563IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
564{
565 iemAImpl_shl_u8,
566 iemAImpl_shl_u16,
567 iemAImpl_shl_u32,
568 iemAImpl_shl_u64
569};
570
571/** Function table for the SHR instruction. */
572IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
573{
574 iemAImpl_shr_u8,
575 iemAImpl_shr_u16,
576 iemAImpl_shr_u32,
577 iemAImpl_shr_u64
578};
579
580/** Function table for the SAR instruction. */
581IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
582{
583 iemAImpl_sar_u8,
584 iemAImpl_sar_u16,
585 iemAImpl_sar_u32,
586 iemAImpl_sar_u64
587};
588
589
590/** Function table for the MUL instruction. */
591IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
592{
593 iemAImpl_mul_u8,
594 iemAImpl_mul_u16,
595 iemAImpl_mul_u32,
596 iemAImpl_mul_u64
597};
598
599/** Function table for the IMUL instruction working implicitly on rAX. */
600IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
601{
602 iemAImpl_imul_u8,
603 iemAImpl_imul_u16,
604 iemAImpl_imul_u32,
605 iemAImpl_imul_u64
606};
607
608/** Function table for the DIV instruction. */
609IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
610{
611 iemAImpl_div_u8,
612 iemAImpl_div_u16,
613 iemAImpl_div_u32,
614 iemAImpl_div_u64
615};
616
617/** Function table for the MUL instruction. */
618IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
619{
620 iemAImpl_idiv_u8,
621 iemAImpl_idiv_u16,
622 iemAImpl_idiv_u32,
623 iemAImpl_idiv_u64
624};
625
626/** Function table for the SHLD instruction */
627IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
628{
629 iemAImpl_shld_u16,
630 iemAImpl_shld_u32,
631 iemAImpl_shld_u64,
632};
633
634/** Function table for the SHRD instruction */
635IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
636{
637 iemAImpl_shrd_u16,
638 iemAImpl_shrd_u32,
639 iemAImpl_shrd_u64,
640};
641
642
643/** Function table for the PUNPCKLBW instruction */
644IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
645/** Function table for the PUNPCKLBD instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
647/** Function table for the PUNPCKLDQ instruction */
648IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
649/** Function table for the PUNPCKLQDQ instruction */
650IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
651
652/** Function table for the PUNPCKHBW instruction */
653IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
654/** Function table for the PUNPCKHBD instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
656/** Function table for the PUNPCKHDQ instruction */
657IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
658/** Function table for the PUNPCKHQDQ instruction */
659IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
660
661/** Function table for the PXOR instruction */
662IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
663/** Function table for the PCMPEQB instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
665/** Function table for the PCMPEQW instruction */
666IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
667/** Function table for the PCMPEQD instruction */
668IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
669
670
671#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
672/** What IEM just wrote. */
673uint8_t g_abIemWrote[256];
674/** How much IEM just wrote. */
675size_t g_cbIemWrote;
676#endif
677
678
679/*********************************************************************************************************************************
680* Internal Functions *
681*********************************************************************************************************************************/
682IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
683IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
684IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
685IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
686/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
687IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
688IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
689IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
691IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
692IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
693IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
694IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
695IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
696IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
698IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
699IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
700IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
701IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
706IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
707IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
708IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
709IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
710IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
711IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
712IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
713IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
714IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
715
716#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
717IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
718#endif
719IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
720IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
721
722
723
724/**
725 * Sets the pass up status.
726 *
727 * @returns VINF_SUCCESS.
728 * @param pIemCpu The per CPU IEM state of the calling thread.
729 * @param rcPassUp The pass up status. Must be informational.
730 * VINF_SUCCESS is not allowed.
731 */
732IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
733{
734 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
735
736 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
737 if (rcOldPassUp == VINF_SUCCESS)
738 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
739 /* If both are EM scheduling codes, use EM priority rules. */
740 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
741 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
742 {
743 if (rcPassUp < rcOldPassUp)
744 {
745 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
746 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
747 }
748 else
749 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
750 }
751 /* Override EM scheduling with specific status code. */
752 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
753 {
754 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
755 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
756 }
757 /* Don't override specific status code, first come first served. */
758 else
759 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
760 return VINF_SUCCESS;
761}
762
763
764/**
765 * Initializes the execution state.
766 *
767 * @param pIemCpu The per CPU IEM state.
768 * @param fBypassHandlers Whether to bypass access handlers.
769 */
770DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
771{
772 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
773 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
774
775 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
776 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
777
778#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
779 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
780 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
781 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
782 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
783 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
784 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
785 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
786 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
787#endif
788
789#ifdef VBOX_WITH_RAW_MODE_NOT_R0
790 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
791#endif
792 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
793 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
794 ? IEMMODE_64BIT
795 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
796 ? IEMMODE_32BIT
797 : IEMMODE_16BIT;
798 pIemCpu->enmCpuMode = enmMode;
799#ifdef VBOX_STRICT
800 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
801 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
802 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
803 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
804 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
805 pIemCpu->uRexReg = 127;
806 pIemCpu->uRexB = 127;
807 pIemCpu->uRexIndex = 127;
808 pIemCpu->iEffSeg = 127;
809 pIemCpu->offOpcode = 127;
810 pIemCpu->cbOpcode = 127;
811#endif
812
813 pIemCpu->cActiveMappings = 0;
814 pIemCpu->iNextMapping = 0;
815 pIemCpu->rcPassUp = VINF_SUCCESS;
816 pIemCpu->fBypassHandlers = fBypassHandlers;
817#ifdef VBOX_WITH_RAW_MODE_NOT_R0
818 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
819 && pCtx->cs.u64Base == 0
820 && pCtx->cs.u32Limit == UINT32_MAX
821 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
822 if (!pIemCpu->fInPatchCode)
823 CPUMRawLeave(pVCpu, VINF_SUCCESS);
824#endif
825}
826
827
828/**
829 * Initializes the decoder state.
830 *
831 * @param pIemCpu The per CPU IEM state.
832 * @param fBypassHandlers Whether to bypass access handlers.
833 */
834DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
835{
836 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
837 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
838
839 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
840 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
841
842#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
843 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
844 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
849 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
850 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
851#endif
852
853#ifdef VBOX_WITH_RAW_MODE_NOT_R0
854 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
855#endif
856 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
857#ifdef IEM_VERIFICATION_MODE_FULL
858 if (pIemCpu->uInjectCpl != UINT8_MAX)
859 pIemCpu->uCpl = pIemCpu->uInjectCpl;
860#endif
861 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
862 ? IEMMODE_64BIT
863 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
864 ? IEMMODE_32BIT
865 : IEMMODE_16BIT;
866 pIemCpu->enmCpuMode = enmMode;
867 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
868 pIemCpu->enmEffAddrMode = enmMode;
869 if (enmMode != IEMMODE_64BIT)
870 {
871 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
872 pIemCpu->enmEffOpSize = enmMode;
873 }
874 else
875 {
876 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
877 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
878 }
879 pIemCpu->fPrefixes = 0;
880 pIemCpu->uRexReg = 0;
881 pIemCpu->uRexB = 0;
882 pIemCpu->uRexIndex = 0;
883 pIemCpu->iEffSeg = X86_SREG_DS;
884 pIemCpu->offOpcode = 0;
885 pIemCpu->cbOpcode = 0;
886 pIemCpu->cActiveMappings = 0;
887 pIemCpu->iNextMapping = 0;
888 pIemCpu->rcPassUp = VINF_SUCCESS;
889 pIemCpu->fBypassHandlers = fBypassHandlers;
890#ifdef VBOX_WITH_RAW_MODE_NOT_R0
891 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
892 && pCtx->cs.u64Base == 0
893 && pCtx->cs.u32Limit == UINT32_MAX
894 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
895 if (!pIemCpu->fInPatchCode)
896 CPUMRawLeave(pVCpu, VINF_SUCCESS);
897#endif
898
899#ifdef DBGFTRACE_ENABLED
900 switch (enmMode)
901 {
902 case IEMMODE_64BIT:
903 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
904 break;
905 case IEMMODE_32BIT:
906 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
907 break;
908 case IEMMODE_16BIT:
909 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
910 break;
911 }
912#endif
913}
914
915
916/**
917 * Prefetch opcodes the first time when starting executing.
918 *
919 * @returns Strict VBox status code.
920 * @param pIemCpu The IEM state.
921 * @param fBypassHandlers Whether to bypass access handlers.
922 */
923IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
924{
925#ifdef IEM_VERIFICATION_MODE_FULL
926 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
927#endif
928 iemInitDecoder(pIemCpu, fBypassHandlers);
929
930 /*
931 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
932 *
933 * First translate CS:rIP to a physical address.
934 */
935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
936 uint32_t cbToTryRead;
937 RTGCPTR GCPtrPC;
938 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
939 {
940 cbToTryRead = PAGE_SIZE;
941 GCPtrPC = pCtx->rip;
942 if (!IEM_IS_CANONICAL(GCPtrPC))
943 return iemRaiseGeneralProtectionFault0(pIemCpu);
944 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
945 }
946 else
947 {
948 uint32_t GCPtrPC32 = pCtx->eip;
949 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
950 if (GCPtrPC32 > pCtx->cs.u32Limit)
951 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
952 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
953 if (!cbToTryRead) /* overflowed */
954 {
955 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
956 cbToTryRead = UINT32_MAX;
957 }
958 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
959 Assert(GCPtrPC <= UINT32_MAX);
960 }
961
962#ifdef VBOX_WITH_RAW_MODE_NOT_R0
963 /* Allow interpretation of patch manager code blocks since they can for
964 instance throw #PFs for perfectly good reasons. */
965 if (pIemCpu->fInPatchCode)
966 {
967 size_t cbRead = 0;
968 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
969 AssertRCReturn(rc, rc);
970 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
971 return VINF_SUCCESS;
972 }
973#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
974
975 RTGCPHYS GCPhys;
976 uint64_t fFlags;
977 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
978 if (RT_FAILURE(rc))
979 {
980 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
981 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
982 }
983 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
984 {
985 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
986 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
987 }
988 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
989 {
990 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
991 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
992 }
993 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
994 /** @todo Check reserved bits and such stuff. PGM is better at doing
995 * that, so do it when implementing the guest virtual address
996 * TLB... */
997
998#ifdef IEM_VERIFICATION_MODE_FULL
999 /*
1000 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1001 * instruction.
1002 */
1003 /** @todo optimize this differently by not using PGMPhysRead. */
1004 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1005 pIemCpu->GCPhysOpcodes = GCPhys;
1006 if ( offPrevOpcodes < cbOldOpcodes
1007 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1008 {
1009 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1010 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1011 pIemCpu->cbOpcode = cbNew;
1012 return VINF_SUCCESS;
1013 }
1014#endif
1015
1016 /*
1017 * Read the bytes at this address.
1018 */
1019 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1020#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1021 size_t cbActual;
1022 if ( PATMIsEnabled(pVM)
1023 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1024 {
1025 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1026 Assert(cbActual > 0);
1027 pIemCpu->cbOpcode = (uint8_t)cbActual;
1028 }
1029 else
1030#endif
1031 {
1032 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1033 if (cbToTryRead > cbLeftOnPage)
1034 cbToTryRead = cbLeftOnPage;
1035 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1036 cbToTryRead = sizeof(pIemCpu->abOpcode);
1037
1038 if (!pIemCpu->fBypassHandlers)
1039 {
1040 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1041 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1042 { /* likely */ }
1043 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1044 {
1045 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1046 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1047 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1048 }
1049 else
1050 {
1051 Log((RT_SUCCESS(rcStrict)
1052 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1053 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1054 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1055 return rcStrict;
1056 }
1057 }
1058 else
1059 {
1060 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1061 if (RT_SUCCESS(rc))
1062 { /* likely */ }
1063 else
1064 {
1065 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1066 GCPtrPC, GCPhys, rc, cbToTryRead));
1067 return rc;
1068 }
1069 }
1070 pIemCpu->cbOpcode = cbToTryRead;
1071 }
1072
1073 return VINF_SUCCESS;
1074}
1075
1076
1077/**
1078 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1079 * exception if it fails.
1080 *
1081 * @returns Strict VBox status code.
1082 * @param pIemCpu The IEM state.
1083 * @param cbMin The minimum number of bytes relative offOpcode
1084 * that must be read.
1085 */
1086IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1087{
1088 /*
1089 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1090 *
1091 * First translate CS:rIP to a physical address.
1092 */
1093 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1094 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1095 uint32_t cbToTryRead;
1096 RTGCPTR GCPtrNext;
1097 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1098 {
1099 cbToTryRead = PAGE_SIZE;
1100 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1101 if (!IEM_IS_CANONICAL(GCPtrNext))
1102 return iemRaiseGeneralProtectionFault0(pIemCpu);
1103 }
1104 else
1105 {
1106 uint32_t GCPtrNext32 = pCtx->eip;
1107 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1108 GCPtrNext32 += pIemCpu->cbOpcode;
1109 if (GCPtrNext32 > pCtx->cs.u32Limit)
1110 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1111 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1112 if (!cbToTryRead) /* overflowed */
1113 {
1114 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1115 cbToTryRead = UINT32_MAX;
1116 /** @todo check out wrapping around the code segment. */
1117 }
1118 if (cbToTryRead < cbMin - cbLeft)
1119 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1120 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1121 }
1122
1123 /* Only read up to the end of the page, and make sure we don't read more
1124 than the opcode buffer can hold. */
1125 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1126 if (cbToTryRead > cbLeftOnPage)
1127 cbToTryRead = cbLeftOnPage;
1128 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1129 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1130/** @todo r=bird: Convert assertion into undefined opcode exception? */
1131 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1132
1133#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1134 /* Allow interpretation of patch manager code blocks since they can for
1135 instance throw #PFs for perfectly good reasons. */
1136 if (pIemCpu->fInPatchCode)
1137 {
1138 size_t cbRead = 0;
1139 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1140 AssertRCReturn(rc, rc);
1141 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1142 return VINF_SUCCESS;
1143 }
1144#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1145
1146 RTGCPHYS GCPhys;
1147 uint64_t fFlags;
1148 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1149 if (RT_FAILURE(rc))
1150 {
1151 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1152 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1153 }
1154 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1155 {
1156 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1157 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1158 }
1159 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1160 {
1161 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1162 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1163 }
1164 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1165 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1166 /** @todo Check reserved bits and such stuff. PGM is better at doing
1167 * that, so do it when implementing the guest virtual address
1168 * TLB... */
1169
1170 /*
1171 * Read the bytes at this address.
1172 *
1173 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1174 * and since PATM should only patch the start of an instruction there
1175 * should be no need to check again here.
1176 */
1177 if (!pIemCpu->fBypassHandlers)
1178 {
1179 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1180 cbToTryRead, PGMACCESSORIGIN_IEM);
1181 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1182 { /* likely */ }
1183 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1184 {
1185 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1186 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1187 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1188 }
1189 else
1190 {
1191 Log((RT_SUCCESS(rcStrict)
1192 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1193 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1194 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1195 return rcStrict;
1196 }
1197 }
1198 else
1199 {
1200 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1201 if (RT_SUCCESS(rc))
1202 { /* likely */ }
1203 else
1204 {
1205 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1206 return rc;
1207 }
1208 }
1209 pIemCpu->cbOpcode += cbToTryRead;
1210 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1211
1212 return VINF_SUCCESS;
1213}
1214
1215
1216/**
1217 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1218 *
1219 * @returns Strict VBox status code.
1220 * @param pIemCpu The IEM state.
1221 * @param pb Where to return the opcode byte.
1222 */
1223DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1224{
1225 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1226 if (rcStrict == VINF_SUCCESS)
1227 {
1228 uint8_t offOpcode = pIemCpu->offOpcode;
1229 *pb = pIemCpu->abOpcode[offOpcode];
1230 pIemCpu->offOpcode = offOpcode + 1;
1231 }
1232 else
1233 *pb = 0;
1234 return rcStrict;
1235}
1236
1237
1238/**
1239 * Fetches the next opcode byte.
1240 *
1241 * @returns Strict VBox status code.
1242 * @param pIemCpu The IEM state.
1243 * @param pu8 Where to return the opcode byte.
1244 */
1245DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1246{
1247 uint8_t const offOpcode = pIemCpu->offOpcode;
1248 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1249 {
1250 *pu8 = pIemCpu->abOpcode[offOpcode];
1251 pIemCpu->offOpcode = offOpcode + 1;
1252 return VINF_SUCCESS;
1253 }
1254 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1255}
1256
1257
1258/**
1259 * Fetches the next opcode byte, returns automatically on failure.
1260 *
1261 * @param a_pu8 Where to return the opcode byte.
1262 * @remark Implicitly references pIemCpu.
1263 */
1264#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1265 do \
1266 { \
1267 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1268 if (rcStrict2 != VINF_SUCCESS) \
1269 return rcStrict2; \
1270 } while (0)
1271
1272
1273/**
1274 * Fetches the next signed byte from the opcode stream.
1275 *
1276 * @returns Strict VBox status code.
1277 * @param pIemCpu The IEM state.
1278 * @param pi8 Where to return the signed byte.
1279 */
1280DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1281{
1282 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1283}
1284
1285
1286/**
1287 * Fetches the next signed byte from the opcode stream, returning automatically
1288 * on failure.
1289 *
1290 * @param a_pi8 Where to return the signed byte.
1291 * @remark Implicitly references pIemCpu.
1292 */
1293#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1294 do \
1295 { \
1296 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1297 if (rcStrict2 != VINF_SUCCESS) \
1298 return rcStrict2; \
1299 } while (0)
1300
1301
1302/**
1303 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1304 *
1305 * @returns Strict VBox status code.
1306 * @param pIemCpu The IEM state.
1307 * @param pu16 Where to return the opcode dword.
1308 */
1309DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1310{
1311 uint8_t u8;
1312 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1313 if (rcStrict == VINF_SUCCESS)
1314 *pu16 = (int8_t)u8;
1315 return rcStrict;
1316}
1317
1318
1319/**
1320 * Fetches the next signed byte from the opcode stream, extending it to
1321 * unsigned 16-bit.
1322 *
1323 * @returns Strict VBox status code.
1324 * @param pIemCpu The IEM state.
1325 * @param pu16 Where to return the unsigned word.
1326 */
1327DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1328{
1329 uint8_t const offOpcode = pIemCpu->offOpcode;
1330 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1331 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1332
1333 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1334 pIemCpu->offOpcode = offOpcode + 1;
1335 return VINF_SUCCESS;
1336}
1337
1338
1339/**
1340 * Fetches the next signed byte from the opcode stream and sign-extending it to
1341 * a word, returning automatically on failure.
1342 *
1343 * @param a_pu16 Where to return the word.
1344 * @remark Implicitly references pIemCpu.
1345 */
1346#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1347 do \
1348 { \
1349 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1350 if (rcStrict2 != VINF_SUCCESS) \
1351 return rcStrict2; \
1352 } while (0)
1353
1354
1355/**
1356 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1357 *
1358 * @returns Strict VBox status code.
1359 * @param pIemCpu The IEM state.
1360 * @param pu32 Where to return the opcode dword.
1361 */
1362DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1363{
1364 uint8_t u8;
1365 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1366 if (rcStrict == VINF_SUCCESS)
1367 *pu32 = (int8_t)u8;
1368 return rcStrict;
1369}
1370
1371
1372/**
1373 * Fetches the next signed byte from the opcode stream, extending it to
1374 * unsigned 32-bit.
1375 *
1376 * @returns Strict VBox status code.
1377 * @param pIemCpu The IEM state.
1378 * @param pu32 Where to return the unsigned dword.
1379 */
1380DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1381{
1382 uint8_t const offOpcode = pIemCpu->offOpcode;
1383 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1384 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1385
1386 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1387 pIemCpu->offOpcode = offOpcode + 1;
1388 return VINF_SUCCESS;
1389}
1390
1391
1392/**
1393 * Fetches the next signed byte from the opcode stream and sign-extending it to
1394 * a word, returning automatically on failure.
1395 *
1396 * @param a_pu32 Where to return the word.
1397 * @remark Implicitly references pIemCpu.
1398 */
1399#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1400 do \
1401 { \
1402 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1403 if (rcStrict2 != VINF_SUCCESS) \
1404 return rcStrict2; \
1405 } while (0)
1406
1407
1408/**
1409 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1410 *
1411 * @returns Strict VBox status code.
1412 * @param pIemCpu The IEM state.
1413 * @param pu64 Where to return the opcode qword.
1414 */
1415DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1416{
1417 uint8_t u8;
1418 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1419 if (rcStrict == VINF_SUCCESS)
1420 *pu64 = (int8_t)u8;
1421 return rcStrict;
1422}
1423
1424
1425/**
1426 * Fetches the next signed byte from the opcode stream, extending it to
1427 * unsigned 64-bit.
1428 *
1429 * @returns Strict VBox status code.
1430 * @param pIemCpu The IEM state.
1431 * @param pu64 Where to return the unsigned qword.
1432 */
1433DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1434{
1435 uint8_t const offOpcode = pIemCpu->offOpcode;
1436 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1437 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1438
1439 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1440 pIemCpu->offOpcode = offOpcode + 1;
1441 return VINF_SUCCESS;
1442}
1443
1444
1445/**
1446 * Fetches the next signed byte from the opcode stream and sign-extending it to
1447 * a word, returning automatically on failure.
1448 *
1449 * @param a_pu64 Where to return the word.
1450 * @remark Implicitly references pIemCpu.
1451 */
1452#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1453 do \
1454 { \
1455 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1456 if (rcStrict2 != VINF_SUCCESS) \
1457 return rcStrict2; \
1458 } while (0)
1459
1460
1461/**
1462 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1463 *
1464 * @returns Strict VBox status code.
1465 * @param pIemCpu The IEM state.
1466 * @param pu16 Where to return the opcode word.
1467 */
1468DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1469{
1470 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1471 if (rcStrict == VINF_SUCCESS)
1472 {
1473 uint8_t offOpcode = pIemCpu->offOpcode;
1474 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1475 pIemCpu->offOpcode = offOpcode + 2;
1476 }
1477 else
1478 *pu16 = 0;
1479 return rcStrict;
1480}
1481
1482
1483/**
1484 * Fetches the next opcode word.
1485 *
1486 * @returns Strict VBox status code.
1487 * @param pIemCpu The IEM state.
1488 * @param pu16 Where to return the opcode word.
1489 */
1490DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1491{
1492 uint8_t const offOpcode = pIemCpu->offOpcode;
1493 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1494 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1495
1496 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1497 pIemCpu->offOpcode = offOpcode + 2;
1498 return VINF_SUCCESS;
1499}
1500
1501
1502/**
1503 * Fetches the next opcode word, returns automatically on failure.
1504 *
1505 * @param a_pu16 Where to return the opcode word.
1506 * @remark Implicitly references pIemCpu.
1507 */
1508#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1509 do \
1510 { \
1511 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1512 if (rcStrict2 != VINF_SUCCESS) \
1513 return rcStrict2; \
1514 } while (0)
1515
1516
1517/**
1518 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1519 *
1520 * @returns Strict VBox status code.
1521 * @param pIemCpu The IEM state.
1522 * @param pu32 Where to return the opcode double word.
1523 */
1524DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1525{
1526 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1527 if (rcStrict == VINF_SUCCESS)
1528 {
1529 uint8_t offOpcode = pIemCpu->offOpcode;
1530 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1531 pIemCpu->offOpcode = offOpcode + 2;
1532 }
1533 else
1534 *pu32 = 0;
1535 return rcStrict;
1536}
1537
1538
1539/**
1540 * Fetches the next opcode word, zero extending it to a double word.
1541 *
1542 * @returns Strict VBox status code.
1543 * @param pIemCpu The IEM state.
1544 * @param pu32 Where to return the opcode double word.
1545 */
1546DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1547{
1548 uint8_t const offOpcode = pIemCpu->offOpcode;
1549 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1550 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1551
1552 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1553 pIemCpu->offOpcode = offOpcode + 2;
1554 return VINF_SUCCESS;
1555}
1556
1557
1558/**
1559 * Fetches the next opcode word and zero extends it to a double word, returns
1560 * automatically on failure.
1561 *
1562 * @param a_pu32 Where to return the opcode double word.
1563 * @remark Implicitly references pIemCpu.
1564 */
1565#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1566 do \
1567 { \
1568 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1569 if (rcStrict2 != VINF_SUCCESS) \
1570 return rcStrict2; \
1571 } while (0)
1572
1573
1574/**
1575 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1576 *
1577 * @returns Strict VBox status code.
1578 * @param pIemCpu The IEM state.
1579 * @param pu64 Where to return the opcode quad word.
1580 */
1581DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1582{
1583 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1584 if (rcStrict == VINF_SUCCESS)
1585 {
1586 uint8_t offOpcode = pIemCpu->offOpcode;
1587 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1588 pIemCpu->offOpcode = offOpcode + 2;
1589 }
1590 else
1591 *pu64 = 0;
1592 return rcStrict;
1593}
1594
1595
1596/**
1597 * Fetches the next opcode word, zero extending it to a quad word.
1598 *
1599 * @returns Strict VBox status code.
1600 * @param pIemCpu The IEM state.
1601 * @param pu64 Where to return the opcode quad word.
1602 */
1603DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1604{
1605 uint8_t const offOpcode = pIemCpu->offOpcode;
1606 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1607 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1608
1609 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1610 pIemCpu->offOpcode = offOpcode + 2;
1611 return VINF_SUCCESS;
1612}
1613
1614
1615/**
1616 * Fetches the next opcode word and zero extends it to a quad word, returns
1617 * automatically on failure.
1618 *
1619 * @param a_pu64 Where to return the opcode quad word.
1620 * @remark Implicitly references pIemCpu.
1621 */
1622#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1623 do \
1624 { \
1625 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1626 if (rcStrict2 != VINF_SUCCESS) \
1627 return rcStrict2; \
1628 } while (0)
1629
1630
1631/**
1632 * Fetches the next signed word from the opcode stream.
1633 *
1634 * @returns Strict VBox status code.
1635 * @param pIemCpu The IEM state.
1636 * @param pi16 Where to return the signed word.
1637 */
1638DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1639{
1640 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1641}
1642
1643
1644/**
1645 * Fetches the next signed word from the opcode stream, returning automatically
1646 * on failure.
1647 *
1648 * @param a_pi16 Where to return the signed word.
1649 * @remark Implicitly references pIemCpu.
1650 */
1651#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1652 do \
1653 { \
1654 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1655 if (rcStrict2 != VINF_SUCCESS) \
1656 return rcStrict2; \
1657 } while (0)
1658
1659
1660/**
1661 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1662 *
1663 * @returns Strict VBox status code.
1664 * @param pIemCpu The IEM state.
1665 * @param pu32 Where to return the opcode dword.
1666 */
1667DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1668{
1669 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1670 if (rcStrict == VINF_SUCCESS)
1671 {
1672 uint8_t offOpcode = pIemCpu->offOpcode;
1673 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1674 pIemCpu->abOpcode[offOpcode + 1],
1675 pIemCpu->abOpcode[offOpcode + 2],
1676 pIemCpu->abOpcode[offOpcode + 3]);
1677 pIemCpu->offOpcode = offOpcode + 4;
1678 }
1679 else
1680 *pu32 = 0;
1681 return rcStrict;
1682}
1683
1684
1685/**
1686 * Fetches the next opcode dword.
1687 *
1688 * @returns Strict VBox status code.
1689 * @param pIemCpu The IEM state.
1690 * @param pu32 Where to return the opcode double word.
1691 */
1692DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1693{
1694 uint8_t const offOpcode = pIemCpu->offOpcode;
1695 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1696 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1697
1698 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1699 pIemCpu->abOpcode[offOpcode + 1],
1700 pIemCpu->abOpcode[offOpcode + 2],
1701 pIemCpu->abOpcode[offOpcode + 3]);
1702 pIemCpu->offOpcode = offOpcode + 4;
1703 return VINF_SUCCESS;
1704}
1705
1706
1707/**
1708 * Fetches the next opcode dword, returns automatically on failure.
1709 *
1710 * @param a_pu32 Where to return the opcode dword.
1711 * @remark Implicitly references pIemCpu.
1712 */
1713#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1714 do \
1715 { \
1716 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1717 if (rcStrict2 != VINF_SUCCESS) \
1718 return rcStrict2; \
1719 } while (0)
1720
1721
1722/**
1723 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1724 *
1725 * @returns Strict VBox status code.
1726 * @param pIemCpu The IEM state.
1727 * @param pu64 Where to return the opcode dword.
1728 */
1729DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1730{
1731 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1732 if (rcStrict == VINF_SUCCESS)
1733 {
1734 uint8_t offOpcode = pIemCpu->offOpcode;
1735 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1736 pIemCpu->abOpcode[offOpcode + 1],
1737 pIemCpu->abOpcode[offOpcode + 2],
1738 pIemCpu->abOpcode[offOpcode + 3]);
1739 pIemCpu->offOpcode = offOpcode + 4;
1740 }
1741 else
1742 *pu64 = 0;
1743 return rcStrict;
1744}
1745
1746
1747/**
1748 * Fetches the next opcode dword, zero extending it to a quad word.
1749 *
1750 * @returns Strict VBox status code.
1751 * @param pIemCpu The IEM state.
1752 * @param pu64 Where to return the opcode quad word.
1753 */
1754DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1755{
1756 uint8_t const offOpcode = pIemCpu->offOpcode;
1757 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1758 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1759
1760 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1761 pIemCpu->abOpcode[offOpcode + 1],
1762 pIemCpu->abOpcode[offOpcode + 2],
1763 pIemCpu->abOpcode[offOpcode + 3]);
1764 pIemCpu->offOpcode = offOpcode + 4;
1765 return VINF_SUCCESS;
1766}
1767
1768
1769/**
1770 * Fetches the next opcode dword and zero extends it to a quad word, returns
1771 * automatically on failure.
1772 *
1773 * @param a_pu64 Where to return the opcode quad word.
1774 * @remark Implicitly references pIemCpu.
1775 */
1776#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1777 do \
1778 { \
1779 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1780 if (rcStrict2 != VINF_SUCCESS) \
1781 return rcStrict2; \
1782 } while (0)
1783
1784
1785/**
1786 * Fetches the next signed double word from the opcode stream.
1787 *
1788 * @returns Strict VBox status code.
1789 * @param pIemCpu The IEM state.
1790 * @param pi32 Where to return the signed double word.
1791 */
1792DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1793{
1794 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1795}
1796
1797/**
1798 * Fetches the next signed double word from the opcode stream, returning
1799 * automatically on failure.
1800 *
1801 * @param a_pi32 Where to return the signed double word.
1802 * @remark Implicitly references pIemCpu.
1803 */
1804#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1805 do \
1806 { \
1807 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1808 if (rcStrict2 != VINF_SUCCESS) \
1809 return rcStrict2; \
1810 } while (0)
1811
1812
1813/**
1814 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1815 *
1816 * @returns Strict VBox status code.
1817 * @param pIemCpu The IEM state.
1818 * @param pu64 Where to return the opcode qword.
1819 */
1820DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1821{
1822 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1823 if (rcStrict == VINF_SUCCESS)
1824 {
1825 uint8_t offOpcode = pIemCpu->offOpcode;
1826 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1827 pIemCpu->abOpcode[offOpcode + 1],
1828 pIemCpu->abOpcode[offOpcode + 2],
1829 pIemCpu->abOpcode[offOpcode + 3]);
1830 pIemCpu->offOpcode = offOpcode + 4;
1831 }
1832 else
1833 *pu64 = 0;
1834 return rcStrict;
1835}
1836
1837
1838/**
1839 * Fetches the next opcode dword, sign extending it into a quad word.
1840 *
1841 * @returns Strict VBox status code.
1842 * @param pIemCpu The IEM state.
1843 * @param pu64 Where to return the opcode quad word.
1844 */
1845DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1846{
1847 uint8_t const offOpcode = pIemCpu->offOpcode;
1848 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1849 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1850
1851 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1852 pIemCpu->abOpcode[offOpcode + 1],
1853 pIemCpu->abOpcode[offOpcode + 2],
1854 pIemCpu->abOpcode[offOpcode + 3]);
1855 *pu64 = i32;
1856 pIemCpu->offOpcode = offOpcode + 4;
1857 return VINF_SUCCESS;
1858}
1859
1860
1861/**
1862 * Fetches the next opcode double word and sign extends it to a quad word,
1863 * returns automatically on failure.
1864 *
1865 * @param a_pu64 Where to return the opcode quad word.
1866 * @remark Implicitly references pIemCpu.
1867 */
1868#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1869 do \
1870 { \
1871 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1872 if (rcStrict2 != VINF_SUCCESS) \
1873 return rcStrict2; \
1874 } while (0)
1875
1876
1877/**
1878 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1879 *
1880 * @returns Strict VBox status code.
1881 * @param pIemCpu The IEM state.
1882 * @param pu64 Where to return the opcode qword.
1883 */
1884DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1885{
1886 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1887 if (rcStrict == VINF_SUCCESS)
1888 {
1889 uint8_t offOpcode = pIemCpu->offOpcode;
1890 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1891 pIemCpu->abOpcode[offOpcode + 1],
1892 pIemCpu->abOpcode[offOpcode + 2],
1893 pIemCpu->abOpcode[offOpcode + 3],
1894 pIemCpu->abOpcode[offOpcode + 4],
1895 pIemCpu->abOpcode[offOpcode + 5],
1896 pIemCpu->abOpcode[offOpcode + 6],
1897 pIemCpu->abOpcode[offOpcode + 7]);
1898 pIemCpu->offOpcode = offOpcode + 8;
1899 }
1900 else
1901 *pu64 = 0;
1902 return rcStrict;
1903}
1904
1905
1906/**
1907 * Fetches the next opcode qword.
1908 *
1909 * @returns Strict VBox status code.
1910 * @param pIemCpu The IEM state.
1911 * @param pu64 Where to return the opcode qword.
1912 */
1913DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1914{
1915 uint8_t const offOpcode = pIemCpu->offOpcode;
1916 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1917 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1918
1919 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1920 pIemCpu->abOpcode[offOpcode + 1],
1921 pIemCpu->abOpcode[offOpcode + 2],
1922 pIemCpu->abOpcode[offOpcode + 3],
1923 pIemCpu->abOpcode[offOpcode + 4],
1924 pIemCpu->abOpcode[offOpcode + 5],
1925 pIemCpu->abOpcode[offOpcode + 6],
1926 pIemCpu->abOpcode[offOpcode + 7]);
1927 pIemCpu->offOpcode = offOpcode + 8;
1928 return VINF_SUCCESS;
1929}
1930
1931
1932/**
1933 * Fetches the next opcode quad word, returns automatically on failure.
1934 *
1935 * @param a_pu64 Where to return the opcode quad word.
1936 * @remark Implicitly references pIemCpu.
1937 */
1938#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1939 do \
1940 { \
1941 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1942 if (rcStrict2 != VINF_SUCCESS) \
1943 return rcStrict2; \
1944 } while (0)
1945
1946
1947/** @name Misc Worker Functions.
1948 * @{
1949 */
1950
1951
1952/**
1953 * Validates a new SS segment.
1954 *
1955 * @returns VBox strict status code.
1956 * @param pIemCpu The IEM per CPU instance data.
1957 * @param pCtx The CPU context.
1958 * @param NewSS The new SS selctor.
1959 * @param uCpl The CPL to load the stack for.
1960 * @param pDesc Where to return the descriptor.
1961 */
1962IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1963{
1964 NOREF(pCtx);
1965
1966 /* Null selectors are not allowed (we're not called for dispatching
1967 interrupts with SS=0 in long mode). */
1968 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1969 {
1970 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1971 return iemRaiseTaskSwitchFault0(pIemCpu);
1972 }
1973
1974 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1975 if ((NewSS & X86_SEL_RPL) != uCpl)
1976 {
1977 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1978 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1979 }
1980
1981 /*
1982 * Read the descriptor.
1983 */
1984 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1985 if (rcStrict != VINF_SUCCESS)
1986 return rcStrict;
1987
1988 /*
1989 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1990 */
1991 if (!pDesc->Legacy.Gen.u1DescType)
1992 {
1993 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1994 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1995 }
1996
1997 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1998 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1999 {
2000 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2001 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2002 }
2003 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2004 {
2005 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2006 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2007 }
2008
2009 /* Is it there? */
2010 /** @todo testcase: Is this checked before the canonical / limit check below? */
2011 if (!pDesc->Legacy.Gen.u1Present)
2012 {
2013 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2014 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2015 }
2016
2017 return VINF_SUCCESS;
2018}
2019
2020
2021/**
2022 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2023 * not.
2024 *
2025 * @param a_pIemCpu The IEM per CPU data.
2026 * @param a_pCtx The CPU context.
2027 */
2028#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2029# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2030 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2031 ? (a_pCtx)->eflags.u \
2032 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2033#else
2034# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2035 ( (a_pCtx)->eflags.u )
2036#endif
2037
2038/**
2039 * Updates the EFLAGS in the correct manner wrt. PATM.
2040 *
2041 * @param a_pIemCpu The IEM per CPU data.
2042 * @param a_pCtx The CPU context.
2043 * @param a_fEfl The new EFLAGS.
2044 */
2045#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2046# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2047 do { \
2048 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2049 (a_pCtx)->eflags.u = (a_fEfl); \
2050 else \
2051 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2052 } while (0)
2053#else
2054# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2055 do { \
2056 (a_pCtx)->eflags.u = (a_fEfl); \
2057 } while (0)
2058#endif
2059
2060
2061/** @} */
2062
2063/** @name Raising Exceptions.
2064 *
2065 * @{
2066 */
2067
2068/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2069 * @{ */
2070/** CPU exception. */
2071#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2072/** External interrupt (from PIC, APIC, whatever). */
2073#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2074/** Software interrupt (int or into, not bound).
2075 * Returns to the following instruction */
2076#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2077/** Takes an error code. */
2078#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2079/** Takes a CR2. */
2080#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2081/** Generated by the breakpoint instruction. */
2082#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2083/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2084#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2085/** @} */
2086
2087
2088/**
2089 * Loads the specified stack far pointer from the TSS.
2090 *
2091 * @returns VBox strict status code.
2092 * @param pIemCpu The IEM per CPU instance data.
2093 * @param pCtx The CPU context.
2094 * @param uCpl The CPL to load the stack for.
2095 * @param pSelSS Where to return the new stack segment.
2096 * @param puEsp Where to return the new stack pointer.
2097 */
2098IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2099 PRTSEL pSelSS, uint32_t *puEsp)
2100{
2101 VBOXSTRICTRC rcStrict;
2102 Assert(uCpl < 4);
2103 *puEsp = 0; /* make gcc happy */
2104 *pSelSS = 0; /* make gcc happy */
2105
2106 switch (pCtx->tr.Attr.n.u4Type)
2107 {
2108 /*
2109 * 16-bit TSS (X86TSS16).
2110 */
2111 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2112 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2113 {
2114 uint32_t off = uCpl * 4 + 2;
2115 if (off + 4 > pCtx->tr.u32Limit)
2116 {
2117 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2118 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2119 }
2120
2121 uint32_t u32Tmp = 0; /* gcc maybe... */
2122 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2123 if (rcStrict == VINF_SUCCESS)
2124 {
2125 *puEsp = RT_LOWORD(u32Tmp);
2126 *pSelSS = RT_HIWORD(u32Tmp);
2127 return VINF_SUCCESS;
2128 }
2129 break;
2130 }
2131
2132 /*
2133 * 32-bit TSS (X86TSS32).
2134 */
2135 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2136 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2137 {
2138 uint32_t off = uCpl * 8 + 4;
2139 if (off + 7 > pCtx->tr.u32Limit)
2140 {
2141 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2142 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2143 }
2144
2145 uint64_t u64Tmp;
2146 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2147 if (rcStrict == VINF_SUCCESS)
2148 {
2149 *puEsp = u64Tmp & UINT32_MAX;
2150 *pSelSS = (RTSEL)(u64Tmp >> 32);
2151 return VINF_SUCCESS;
2152 }
2153 break;
2154 }
2155
2156 default:
2157 AssertFailedReturn(VERR_IEM_IPE_4);
2158 }
2159 return rcStrict;
2160}
2161
2162
2163/**
2164 * Loads the specified stack pointer from the 64-bit TSS.
2165 *
2166 * @returns VBox strict status code.
2167 * @param pIemCpu The IEM per CPU instance data.
2168 * @param pCtx The CPU context.
2169 * @param uCpl The CPL to load the stack for.
2170 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2171 * @param puRsp Where to return the new stack pointer.
2172 */
2173IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2174{
2175 Assert(uCpl < 4);
2176 Assert(uIst < 8);
2177 *puRsp = 0; /* make gcc happy */
2178
2179 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2180
2181 uint32_t off;
2182 if (uIst)
2183 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2184 else
2185 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2186 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2187 {
2188 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2189 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2190 }
2191
2192 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2193}
2194
2195
2196/**
2197 * Adjust the CPU state according to the exception being raised.
2198 *
2199 * @param pCtx The CPU context.
2200 * @param u8Vector The exception that has been raised.
2201 */
2202DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2203{
2204 switch (u8Vector)
2205 {
2206 case X86_XCPT_DB:
2207 pCtx->dr[7] &= ~X86_DR7_GD;
2208 break;
2209 /** @todo Read the AMD and Intel exception reference... */
2210 }
2211}
2212
2213
2214/**
2215 * Implements exceptions and interrupts for real mode.
2216 *
2217 * @returns VBox strict status code.
2218 * @param pIemCpu The IEM per CPU instance data.
2219 * @param pCtx The CPU context.
2220 * @param cbInstr The number of bytes to offset rIP by in the return
2221 * address.
2222 * @param u8Vector The interrupt / exception vector number.
2223 * @param fFlags The flags.
2224 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2225 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2226 */
2227IEM_STATIC VBOXSTRICTRC
2228iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2229 PCPUMCTX pCtx,
2230 uint8_t cbInstr,
2231 uint8_t u8Vector,
2232 uint32_t fFlags,
2233 uint16_t uErr,
2234 uint64_t uCr2)
2235{
2236 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2237 NOREF(uErr); NOREF(uCr2);
2238
2239 /*
2240 * Read the IDT entry.
2241 */
2242 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2243 {
2244 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2245 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2246 }
2247 RTFAR16 Idte;
2248 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2249 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2250 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2251 return rcStrict;
2252
2253 /*
2254 * Push the stack frame.
2255 */
2256 uint16_t *pu16Frame;
2257 uint64_t uNewRsp;
2258 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2259 if (rcStrict != VINF_SUCCESS)
2260 return rcStrict;
2261
2262 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2263 pu16Frame[2] = (uint16_t)fEfl;
2264 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2265 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2266 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2267 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2268 return rcStrict;
2269
2270 /*
2271 * Load the vector address into cs:ip and make exception specific state
2272 * adjustments.
2273 */
2274 pCtx->cs.Sel = Idte.sel;
2275 pCtx->cs.ValidSel = Idte.sel;
2276 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2277 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2278 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2279 pCtx->rip = Idte.off;
2280 fEfl &= ~X86_EFL_IF;
2281 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2282
2283 /** @todo do we actually do this in real mode? */
2284 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2285 iemRaiseXcptAdjustState(pCtx, u8Vector);
2286
2287 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2288}
2289
2290
2291/**
2292 * Loads a NULL data selector into when coming from V8086 mode.
2293 *
2294 * @param pIemCpu The IEM per CPU instance data.
2295 * @param pSReg Pointer to the segment register.
2296 */
2297IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2298{
2299 pSReg->Sel = 0;
2300 pSReg->ValidSel = 0;
2301 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2302 {
2303 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2304 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2305 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2306 }
2307 else
2308 {
2309 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2310 /** @todo check this on AMD-V */
2311 pSReg->u64Base = 0;
2312 pSReg->u32Limit = 0;
2313 }
2314}
2315
2316
2317/**
2318 * Loads a segment selector during a task switch in V8086 mode.
2319 *
2320 * @param pIemCpu The IEM per CPU instance data.
2321 * @param pSReg Pointer to the segment register.
2322 * @param uSel The selector value to load.
2323 */
2324IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2325{
2326 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2327 pSReg->Sel = uSel;
2328 pSReg->ValidSel = uSel;
2329 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2330 pSReg->u64Base = uSel << 4;
2331 pSReg->u32Limit = 0xffff;
2332 pSReg->Attr.u = 0xf3;
2333}
2334
2335
2336/**
2337 * Loads a NULL data selector into a selector register, both the hidden and
2338 * visible parts, in protected mode.
2339 *
2340 * @param pIemCpu The IEM state of the calling EMT.
2341 * @param pSReg Pointer to the segment register.
2342 * @param uRpl The RPL.
2343 */
2344IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2345{
2346 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2347 * data selector in protected mode. */
2348 pSReg->Sel = uRpl;
2349 pSReg->ValidSel = uRpl;
2350 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2351 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2352 {
2353 /* VT-x (Intel 3960x) observed doing something like this. */
2354 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2355 pSReg->u32Limit = UINT32_MAX;
2356 pSReg->u64Base = 0;
2357 }
2358 else
2359 {
2360 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2361 pSReg->u32Limit = 0;
2362 pSReg->u64Base = 0;
2363 }
2364}
2365
2366
2367/**
2368 * Loads a segment selector during a task switch in protected mode.
2369 *
2370 * In this task switch scenario, we would throw \#TS exceptions rather than
2371 * \#GPs.
2372 *
2373 * @returns VBox strict status code.
2374 * @param pIemCpu The IEM per CPU instance data.
2375 * @param pSReg Pointer to the segment register.
2376 * @param uSel The new selector value.
2377 *
2378 * @remarks This does _not_ handle CS or SS.
2379 * @remarks This expects pIemCpu->uCpl to be up to date.
2380 */
2381IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2382{
2383 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2384
2385 /* Null data selector. */
2386 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2387 {
2388 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2390 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2391 return VINF_SUCCESS;
2392 }
2393
2394 /* Fetch the descriptor. */
2395 IEMSELDESC Desc;
2396 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2397 if (rcStrict != VINF_SUCCESS)
2398 {
2399 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2400 VBOXSTRICTRC_VAL(rcStrict)));
2401 return rcStrict;
2402 }
2403
2404 /* Must be a data segment or readable code segment. */
2405 if ( !Desc.Legacy.Gen.u1DescType
2406 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2407 {
2408 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2409 Desc.Legacy.Gen.u4Type));
2410 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2411 }
2412
2413 /* Check privileges for data segments and non-conforming code segments. */
2414 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2415 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2416 {
2417 /* The RPL and the new CPL must be less than or equal to the DPL. */
2418 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2419 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2420 {
2421 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2422 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2423 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2424 }
2425 }
2426
2427 /* Is it there? */
2428 if (!Desc.Legacy.Gen.u1Present)
2429 {
2430 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2431 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2432 }
2433
2434 /* The base and limit. */
2435 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2436 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2437
2438 /*
2439 * Ok, everything checked out fine. Now set the accessed bit before
2440 * committing the result into the registers.
2441 */
2442 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2443 {
2444 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2445 if (rcStrict != VINF_SUCCESS)
2446 return rcStrict;
2447 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2448 }
2449
2450 /* Commit */
2451 pSReg->Sel = uSel;
2452 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2453 pSReg->u32Limit = cbLimit;
2454 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2455 pSReg->ValidSel = uSel;
2456 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2457 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2458 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2459
2460 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2461 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2462 return VINF_SUCCESS;
2463}
2464
2465
2466/**
2467 * Performs a task switch.
2468 *
2469 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2470 * caller is responsible for performing the necessary checks (like DPL, TSS
2471 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2472 * reference for JMP, CALL, IRET.
2473 *
2474 * If the task switch is the due to a software interrupt or hardware exception,
2475 * the caller is responsible for validating the TSS selector and descriptor. See
2476 * Intel Instruction reference for INT n.
2477 *
2478 * @returns VBox strict status code.
2479 * @param pIemCpu The IEM per CPU instance data.
2480 * @param pCtx The CPU context.
2481 * @param enmTaskSwitch What caused this task switch.
2482 * @param uNextEip The EIP effective after the task switch.
2483 * @param fFlags The flags.
2484 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2485 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2486 * @param SelTSS The TSS selector of the new task.
2487 * @param pNewDescTSS Pointer to the new TSS descriptor.
2488 */
2489IEM_STATIC VBOXSTRICTRC
2490iemTaskSwitch(PIEMCPU pIemCpu,
2491 PCPUMCTX pCtx,
2492 IEMTASKSWITCH enmTaskSwitch,
2493 uint32_t uNextEip,
2494 uint32_t fFlags,
2495 uint16_t uErr,
2496 uint64_t uCr2,
2497 RTSEL SelTSS,
2498 PIEMSELDESC pNewDescTSS)
2499{
2500 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2501 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2502
2503 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2504 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2505 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2506 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2507 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2508
2509 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2510 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2511
2512 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2513 fIsNewTSS386, pCtx->eip, uNextEip));
2514
2515 /* Update CR2 in case it's a page-fault. */
2516 /** @todo This should probably be done much earlier in IEM/PGM. See
2517 * @bugref{5653#c49}. */
2518 if (fFlags & IEM_XCPT_FLAGS_CR2)
2519 pCtx->cr2 = uCr2;
2520
2521 /*
2522 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2523 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2524 */
2525 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2526 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2527 if (uNewTSSLimit < uNewTSSLimitMin)
2528 {
2529 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2530 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2531 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2532 }
2533
2534 /*
2535 * Check the current TSS limit. The last written byte to the current TSS during the
2536 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2537 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2538 *
2539 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2540 * end up with smaller than "legal" TSS limits.
2541 */
2542 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2543 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2544 if (uCurTSSLimit < uCurTSSLimitMin)
2545 {
2546 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2547 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2548 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2549 }
2550
2551 /*
2552 * Verify that the new TSS can be accessed and map it. Map only the required contents
2553 * and not the entire TSS.
2554 */
2555 void *pvNewTSS;
2556 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2557 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2558 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2559 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2560 * not perform correct translation if this happens. See Intel spec. 7.2.1
2561 * "Task-State Segment" */
2562 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2563 if (rcStrict != VINF_SUCCESS)
2564 {
2565 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2566 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2567 return rcStrict;
2568 }
2569
2570 /*
2571 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2572 */
2573 uint32_t u32EFlags = pCtx->eflags.u32;
2574 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2575 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2576 {
2577 PX86DESC pDescCurTSS;
2578 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2579 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2580 if (rcStrict != VINF_SUCCESS)
2581 {
2582 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2583 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2584 return rcStrict;
2585 }
2586
2587 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2588 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2589 if (rcStrict != VINF_SUCCESS)
2590 {
2591 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2592 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2593 return rcStrict;
2594 }
2595
2596 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2597 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2598 {
2599 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2600 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2601 u32EFlags &= ~X86_EFL_NT;
2602 }
2603 }
2604
2605 /*
2606 * Save the CPU state into the current TSS.
2607 */
2608 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2609 if (GCPtrNewTSS == GCPtrCurTSS)
2610 {
2611 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2612 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2613 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2614 }
2615 if (fIsNewTSS386)
2616 {
2617 /*
2618 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2619 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2620 */
2621 void *pvCurTSS32;
2622 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2623 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2624 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2625 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2626 if (rcStrict != VINF_SUCCESS)
2627 {
2628 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2629 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2630 return rcStrict;
2631 }
2632
2633 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2634 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2635 pCurTSS32->eip = uNextEip;
2636 pCurTSS32->eflags = u32EFlags;
2637 pCurTSS32->eax = pCtx->eax;
2638 pCurTSS32->ecx = pCtx->ecx;
2639 pCurTSS32->edx = pCtx->edx;
2640 pCurTSS32->ebx = pCtx->ebx;
2641 pCurTSS32->esp = pCtx->esp;
2642 pCurTSS32->ebp = pCtx->ebp;
2643 pCurTSS32->esi = pCtx->esi;
2644 pCurTSS32->edi = pCtx->edi;
2645 pCurTSS32->es = pCtx->es.Sel;
2646 pCurTSS32->cs = pCtx->cs.Sel;
2647 pCurTSS32->ss = pCtx->ss.Sel;
2648 pCurTSS32->ds = pCtx->ds.Sel;
2649 pCurTSS32->fs = pCtx->fs.Sel;
2650 pCurTSS32->gs = pCtx->gs.Sel;
2651
2652 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2653 if (rcStrict != VINF_SUCCESS)
2654 {
2655 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2656 VBOXSTRICTRC_VAL(rcStrict)));
2657 return rcStrict;
2658 }
2659 }
2660 else
2661 {
2662 /*
2663 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2664 */
2665 void *pvCurTSS16;
2666 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2667 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2668 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2669 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2670 if (rcStrict != VINF_SUCCESS)
2671 {
2672 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2673 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2674 return rcStrict;
2675 }
2676
2677 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2678 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2679 pCurTSS16->ip = uNextEip;
2680 pCurTSS16->flags = u32EFlags;
2681 pCurTSS16->ax = pCtx->ax;
2682 pCurTSS16->cx = pCtx->cx;
2683 pCurTSS16->dx = pCtx->dx;
2684 pCurTSS16->bx = pCtx->bx;
2685 pCurTSS16->sp = pCtx->sp;
2686 pCurTSS16->bp = pCtx->bp;
2687 pCurTSS16->si = pCtx->si;
2688 pCurTSS16->di = pCtx->di;
2689 pCurTSS16->es = pCtx->es.Sel;
2690 pCurTSS16->cs = pCtx->cs.Sel;
2691 pCurTSS16->ss = pCtx->ss.Sel;
2692 pCurTSS16->ds = pCtx->ds.Sel;
2693
2694 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2695 if (rcStrict != VINF_SUCCESS)
2696 {
2697 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2698 VBOXSTRICTRC_VAL(rcStrict)));
2699 return rcStrict;
2700 }
2701 }
2702
2703 /*
2704 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2705 */
2706 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2707 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2708 {
2709 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2710 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2711 pNewTSS->selPrev = pCtx->tr.Sel;
2712 }
2713
2714 /*
2715 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2716 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2717 */
2718 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2719 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2720 bool fNewDebugTrap;
2721 if (fIsNewTSS386)
2722 {
2723 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2724 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2725 uNewEip = pNewTSS32->eip;
2726 uNewEflags = pNewTSS32->eflags;
2727 uNewEax = pNewTSS32->eax;
2728 uNewEcx = pNewTSS32->ecx;
2729 uNewEdx = pNewTSS32->edx;
2730 uNewEbx = pNewTSS32->ebx;
2731 uNewEsp = pNewTSS32->esp;
2732 uNewEbp = pNewTSS32->ebp;
2733 uNewEsi = pNewTSS32->esi;
2734 uNewEdi = pNewTSS32->edi;
2735 uNewES = pNewTSS32->es;
2736 uNewCS = pNewTSS32->cs;
2737 uNewSS = pNewTSS32->ss;
2738 uNewDS = pNewTSS32->ds;
2739 uNewFS = pNewTSS32->fs;
2740 uNewGS = pNewTSS32->gs;
2741 uNewLdt = pNewTSS32->selLdt;
2742 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2743 }
2744 else
2745 {
2746 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2747 uNewCr3 = 0;
2748 uNewEip = pNewTSS16->ip;
2749 uNewEflags = pNewTSS16->flags;
2750 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2751 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2752 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2753 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2754 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2755 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2756 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2757 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2758 uNewES = pNewTSS16->es;
2759 uNewCS = pNewTSS16->cs;
2760 uNewSS = pNewTSS16->ss;
2761 uNewDS = pNewTSS16->ds;
2762 uNewFS = 0;
2763 uNewGS = 0;
2764 uNewLdt = pNewTSS16->selLdt;
2765 fNewDebugTrap = false;
2766 }
2767
2768 if (GCPtrNewTSS == GCPtrCurTSS)
2769 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2770 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2771
2772 /*
2773 * We're done accessing the new TSS.
2774 */
2775 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2776 if (rcStrict != VINF_SUCCESS)
2777 {
2778 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2779 return rcStrict;
2780 }
2781
2782 /*
2783 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2784 */
2785 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2786 {
2787 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2788 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2789 if (rcStrict != VINF_SUCCESS)
2790 {
2791 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2792 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2793 return rcStrict;
2794 }
2795
2796 /* Check that the descriptor indicates the new TSS is available (not busy). */
2797 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2798 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2799 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2800
2801 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2802 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2803 if (rcStrict != VINF_SUCCESS)
2804 {
2805 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2806 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2807 return rcStrict;
2808 }
2809 }
2810
2811 /*
2812 * From this point on, we're technically in the new task. We will defer exceptions
2813 * until the completion of the task switch but before executing any instructions in the new task.
2814 */
2815 pCtx->tr.Sel = SelTSS;
2816 pCtx->tr.ValidSel = SelTSS;
2817 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2818 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2819 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2820 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2821 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2822
2823 /* Set the busy bit in TR. */
2824 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2825 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2826 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2827 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2828 {
2829 uNewEflags |= X86_EFL_NT;
2830 }
2831
2832 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2833 pCtx->cr0 |= X86_CR0_TS;
2834 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2835
2836 pCtx->eip = uNewEip;
2837 pCtx->eax = uNewEax;
2838 pCtx->ecx = uNewEcx;
2839 pCtx->edx = uNewEdx;
2840 pCtx->ebx = uNewEbx;
2841 pCtx->esp = uNewEsp;
2842 pCtx->ebp = uNewEbp;
2843 pCtx->esi = uNewEsi;
2844 pCtx->edi = uNewEdi;
2845
2846 uNewEflags &= X86_EFL_LIVE_MASK;
2847 uNewEflags |= X86_EFL_RA1_MASK;
2848 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2849
2850 /*
2851 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2852 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2853 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2854 */
2855 pCtx->es.Sel = uNewES;
2856 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2857 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2858
2859 pCtx->cs.Sel = uNewCS;
2860 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2861 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2862
2863 pCtx->ss.Sel = uNewSS;
2864 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2865 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2866
2867 pCtx->ds.Sel = uNewDS;
2868 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2869 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2870
2871 pCtx->fs.Sel = uNewFS;
2872 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2873 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2874
2875 pCtx->gs.Sel = uNewGS;
2876 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2877 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2878 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2879
2880 pCtx->ldtr.Sel = uNewLdt;
2881 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2882 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2883 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2884
2885 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2886 {
2887 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2888 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2889 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2890 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2891 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2892 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2893 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2894 }
2895
2896 /*
2897 * Switch CR3 for the new task.
2898 */
2899 if ( fIsNewTSS386
2900 && (pCtx->cr0 & X86_CR0_PG))
2901 {
2902 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2903 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2904 {
2905 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2906 AssertRCSuccessReturn(rc, rc);
2907 }
2908 else
2909 pCtx->cr3 = uNewCr3;
2910
2911 /* Inform PGM. */
2912 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2913 {
2914 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2915 AssertRCReturn(rc, rc);
2916 /* ignore informational status codes */
2917 }
2918 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2919 }
2920
2921 /*
2922 * Switch LDTR for the new task.
2923 */
2924 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2925 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2926 else
2927 {
2928 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2929
2930 IEMSELDESC DescNewLdt;
2931 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2932 if (rcStrict != VINF_SUCCESS)
2933 {
2934 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2935 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2936 return rcStrict;
2937 }
2938 if ( !DescNewLdt.Legacy.Gen.u1Present
2939 || DescNewLdt.Legacy.Gen.u1DescType
2940 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2941 {
2942 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2943 uNewLdt, DescNewLdt.Legacy.u));
2944 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2945 }
2946
2947 pCtx->ldtr.ValidSel = uNewLdt;
2948 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2949 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2950 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2951 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2952 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2953 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2954 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2955 }
2956
2957 IEMSELDESC DescSS;
2958 if (IEM_IS_V86_MODE(pIemCpu))
2959 {
2960 pIemCpu->uCpl = 3;
2961 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
2962 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
2963 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
2964 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
2965 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
2966 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
2967 }
2968 else
2969 {
2970 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
2971
2972 /*
2973 * Load the stack segment for the new task.
2974 */
2975 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2976 {
2977 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2978 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2979 }
2980
2981 /* Fetch the descriptor. */
2982 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
2983 if (rcStrict != VINF_SUCCESS)
2984 {
2985 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2986 VBOXSTRICTRC_VAL(rcStrict)));
2987 return rcStrict;
2988 }
2989
2990 /* SS must be a data segment and writable. */
2991 if ( !DescSS.Legacy.Gen.u1DescType
2992 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2993 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2994 {
2995 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2996 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2997 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2998 }
2999
3000 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3001 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3002 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3003 {
3004 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3005 uNewCpl));
3006 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3007 }
3008
3009 /* Is it there? */
3010 if (!DescSS.Legacy.Gen.u1Present)
3011 {
3012 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3013 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3014 }
3015
3016 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3017 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3018
3019 /* Set the accessed bit before committing the result into SS. */
3020 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3021 {
3022 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3023 if (rcStrict != VINF_SUCCESS)
3024 return rcStrict;
3025 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3026 }
3027
3028 /* Commit SS. */
3029 pCtx->ss.Sel = uNewSS;
3030 pCtx->ss.ValidSel = uNewSS;
3031 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3032 pCtx->ss.u32Limit = cbLimit;
3033 pCtx->ss.u64Base = u64Base;
3034 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3035 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3036
3037 /* CPL has changed, update IEM before loading rest of segments. */
3038 pIemCpu->uCpl = uNewCpl;
3039
3040 /*
3041 * Load the data segments for the new task.
3042 */
3043 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3044 if (rcStrict != VINF_SUCCESS)
3045 return rcStrict;
3046 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3047 if (rcStrict != VINF_SUCCESS)
3048 return rcStrict;
3049 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3050 if (rcStrict != VINF_SUCCESS)
3051 return rcStrict;
3052 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3053 if (rcStrict != VINF_SUCCESS)
3054 return rcStrict;
3055
3056 /*
3057 * Load the code segment for the new task.
3058 */
3059 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3060 {
3061 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3062 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3063 }
3064
3065 /* Fetch the descriptor. */
3066 IEMSELDESC DescCS;
3067 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3068 if (rcStrict != VINF_SUCCESS)
3069 {
3070 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3071 return rcStrict;
3072 }
3073
3074 /* CS must be a code segment. */
3075 if ( !DescCS.Legacy.Gen.u1DescType
3076 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3077 {
3078 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3079 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3080 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3081 }
3082
3083 /* For conforming CS, DPL must be less than or equal to the RPL. */
3084 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3085 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3086 {
3087 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3088 DescCS.Legacy.Gen.u2Dpl));
3089 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3090 }
3091
3092 /* For non-conforming CS, DPL must match RPL. */
3093 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3094 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3095 {
3096 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3097 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3098 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3099 }
3100
3101 /* Is it there? */
3102 if (!DescCS.Legacy.Gen.u1Present)
3103 {
3104 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3105 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3106 }
3107
3108 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3109 u64Base = X86DESC_BASE(&DescCS.Legacy);
3110
3111 /* Set the accessed bit before committing the result into CS. */
3112 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3113 {
3114 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3115 if (rcStrict != VINF_SUCCESS)
3116 return rcStrict;
3117 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3118 }
3119
3120 /* Commit CS. */
3121 pCtx->cs.Sel = uNewCS;
3122 pCtx->cs.ValidSel = uNewCS;
3123 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3124 pCtx->cs.u32Limit = cbLimit;
3125 pCtx->cs.u64Base = u64Base;
3126 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3128 }
3129
3130 /** @todo Debug trap. */
3131 if (fIsNewTSS386 && fNewDebugTrap)
3132 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3133
3134 /*
3135 * Construct the error code masks based on what caused this task switch.
3136 * See Intel Instruction reference for INT.
3137 */
3138 uint16_t uExt;
3139 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3140 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3141 {
3142 uExt = 1;
3143 }
3144 else
3145 uExt = 0;
3146
3147 /*
3148 * Push any error code on to the new stack.
3149 */
3150 if (fFlags & IEM_XCPT_FLAGS_ERR)
3151 {
3152 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3153 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3154 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3155
3156 /* Check that there is sufficient space on the stack. */
3157 /** @todo Factor out segment limit checking for normal/expand down segments
3158 * into a separate function. */
3159 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3160 {
3161 if ( pCtx->esp - 1 > cbLimitSS
3162 || pCtx->esp < cbStackFrame)
3163 {
3164 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3165 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3166 cbStackFrame));
3167 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3168 }
3169 }
3170 else
3171 {
3172 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3173 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3174 {
3175 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3176 cbStackFrame));
3177 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3178 }
3179 }
3180
3181
3182 if (fIsNewTSS386)
3183 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3184 else
3185 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3186 if (rcStrict != VINF_SUCCESS)
3187 {
3188 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3189 VBOXSTRICTRC_VAL(rcStrict)));
3190 return rcStrict;
3191 }
3192 }
3193
3194 /* Check the new EIP against the new CS limit. */
3195 if (pCtx->eip > pCtx->cs.u32Limit)
3196 {
3197 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3198 pCtx->eip, pCtx->cs.u32Limit));
3199 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3200 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3201 }
3202
3203 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3204 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3205}
3206
3207
3208/**
3209 * Implements exceptions and interrupts for protected mode.
3210 *
3211 * @returns VBox strict status code.
3212 * @param pIemCpu The IEM per CPU instance data.
3213 * @param pCtx The CPU context.
3214 * @param cbInstr The number of bytes to offset rIP by in the return
3215 * address.
3216 * @param u8Vector The interrupt / exception vector number.
3217 * @param fFlags The flags.
3218 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3219 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3220 */
3221IEM_STATIC VBOXSTRICTRC
3222iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3223 PCPUMCTX pCtx,
3224 uint8_t cbInstr,
3225 uint8_t u8Vector,
3226 uint32_t fFlags,
3227 uint16_t uErr,
3228 uint64_t uCr2)
3229{
3230 /*
3231 * Read the IDT entry.
3232 */
3233 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3234 {
3235 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3236 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3237 }
3238 X86DESC Idte;
3239 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3240 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3241 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3242 return rcStrict;
3243 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3244 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3245 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3246
3247 /*
3248 * Check the descriptor type, DPL and such.
3249 * ASSUMES this is done in the same order as described for call-gate calls.
3250 */
3251 if (Idte.Gate.u1DescType)
3252 {
3253 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3254 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3255 }
3256 bool fTaskGate = false;
3257 uint8_t f32BitGate = true;
3258 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3259 switch (Idte.Gate.u4Type)
3260 {
3261 case X86_SEL_TYPE_SYS_UNDEFINED:
3262 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3263 case X86_SEL_TYPE_SYS_LDT:
3264 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3265 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3266 case X86_SEL_TYPE_SYS_UNDEFINED2:
3267 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3268 case X86_SEL_TYPE_SYS_UNDEFINED3:
3269 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3270 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3271 case X86_SEL_TYPE_SYS_UNDEFINED4:
3272 {
3273 /** @todo check what actually happens when the type is wrong...
3274 * esp. call gates. */
3275 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3276 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3277 }
3278
3279 case X86_SEL_TYPE_SYS_286_INT_GATE:
3280 f32BitGate = false;
3281 case X86_SEL_TYPE_SYS_386_INT_GATE:
3282 fEflToClear |= X86_EFL_IF;
3283 break;
3284
3285 case X86_SEL_TYPE_SYS_TASK_GATE:
3286 fTaskGate = true;
3287#ifndef IEM_IMPLEMENTS_TASKSWITCH
3288 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3289#endif
3290 break;
3291
3292 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3293 f32BitGate = false;
3294 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3295 break;
3296
3297 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3298 }
3299
3300 /* Check DPL against CPL if applicable. */
3301 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3302 {
3303 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3304 {
3305 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3306 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3307 }
3308 }
3309
3310 /* Is it there? */
3311 if (!Idte.Gate.u1Present)
3312 {
3313 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3314 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3315 }
3316
3317 /* Is it a task-gate? */
3318 if (fTaskGate)
3319 {
3320 /*
3321 * Construct the error code masks based on what caused this task switch.
3322 * See Intel Instruction reference for INT.
3323 */
3324 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3325 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3326 RTSEL SelTSS = Idte.Gate.u16Sel;
3327
3328 /*
3329 * Fetch the TSS descriptor in the GDT.
3330 */
3331 IEMSELDESC DescTSS;
3332 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3333 if (rcStrict != VINF_SUCCESS)
3334 {
3335 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3336 VBOXSTRICTRC_VAL(rcStrict)));
3337 return rcStrict;
3338 }
3339
3340 /* The TSS descriptor must be a system segment and be available (not busy). */
3341 if ( DescTSS.Legacy.Gen.u1DescType
3342 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3343 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3344 {
3345 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3346 u8Vector, SelTSS, DescTSS.Legacy.au64));
3347 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3348 }
3349
3350 /* The TSS must be present. */
3351 if (!DescTSS.Legacy.Gen.u1Present)
3352 {
3353 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3354 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3355 }
3356
3357 /* Do the actual task switch. */
3358 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3359 }
3360
3361 /* A null CS is bad. */
3362 RTSEL NewCS = Idte.Gate.u16Sel;
3363 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3364 {
3365 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3366 return iemRaiseGeneralProtectionFault0(pIemCpu);
3367 }
3368
3369 /* Fetch the descriptor for the new CS. */
3370 IEMSELDESC DescCS;
3371 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3372 if (rcStrict != VINF_SUCCESS)
3373 {
3374 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3375 return rcStrict;
3376 }
3377
3378 /* Must be a code segment. */
3379 if (!DescCS.Legacy.Gen.u1DescType)
3380 {
3381 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3382 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3383 }
3384 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3385 {
3386 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3387 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3388 }
3389
3390 /* Don't allow lowering the privilege level. */
3391 /** @todo Does the lowering of privileges apply to software interrupts
3392 * only? This has bearings on the more-privileged or
3393 * same-privilege stack behavior further down. A testcase would
3394 * be nice. */
3395 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3396 {
3397 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3398 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3399 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3400 }
3401
3402 /* Make sure the selector is present. */
3403 if (!DescCS.Legacy.Gen.u1Present)
3404 {
3405 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3406 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3407 }
3408
3409 /* Check the new EIP against the new CS limit. */
3410 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3411 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3412 ? Idte.Gate.u16OffsetLow
3413 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3414 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3415 if (uNewEip > cbLimitCS)
3416 {
3417 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3418 u8Vector, uNewEip, cbLimitCS, NewCS));
3419 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3420 }
3421
3422 /* Calc the flag image to push. */
3423 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3424 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3425 fEfl &= ~X86_EFL_RF;
3426 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3427 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3428
3429 /* From V8086 mode only go to CPL 0. */
3430 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3431 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3432 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3433 {
3434 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3435 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3436 }
3437
3438 /*
3439 * If the privilege level changes, we need to get a new stack from the TSS.
3440 * This in turns means validating the new SS and ESP...
3441 */
3442 if (uNewCpl != pIemCpu->uCpl)
3443 {
3444 RTSEL NewSS;
3445 uint32_t uNewEsp;
3446 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3447 if (rcStrict != VINF_SUCCESS)
3448 return rcStrict;
3449
3450 IEMSELDESC DescSS;
3451 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3452 if (rcStrict != VINF_SUCCESS)
3453 return rcStrict;
3454
3455 /* Check that there is sufficient space for the stack frame. */
3456 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3457 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3458 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3459 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3460
3461 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3462 {
3463 if ( uNewEsp - 1 > cbLimitSS
3464 || uNewEsp < cbStackFrame)
3465 {
3466 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3467 u8Vector, NewSS, uNewEsp, cbStackFrame));
3468 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3469 }
3470 }
3471 else
3472 {
3473 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3474 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3475 {
3476 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3477 u8Vector, NewSS, uNewEsp, cbStackFrame));
3478 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3479 }
3480 }
3481
3482 /*
3483 * Start making changes.
3484 */
3485
3486 /* Create the stack frame. */
3487 RTPTRUNION uStackFrame;
3488 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3489 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3490 if (rcStrict != VINF_SUCCESS)
3491 return rcStrict;
3492 void * const pvStackFrame = uStackFrame.pv;
3493 if (f32BitGate)
3494 {
3495 if (fFlags & IEM_XCPT_FLAGS_ERR)
3496 *uStackFrame.pu32++ = uErr;
3497 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3498 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3499 uStackFrame.pu32[2] = fEfl;
3500 uStackFrame.pu32[3] = pCtx->esp;
3501 uStackFrame.pu32[4] = pCtx->ss.Sel;
3502 if (fEfl & X86_EFL_VM)
3503 {
3504 uStackFrame.pu32[1] = pCtx->cs.Sel;
3505 uStackFrame.pu32[5] = pCtx->es.Sel;
3506 uStackFrame.pu32[6] = pCtx->ds.Sel;
3507 uStackFrame.pu32[7] = pCtx->fs.Sel;
3508 uStackFrame.pu32[8] = pCtx->gs.Sel;
3509 }
3510 }
3511 else
3512 {
3513 if (fFlags & IEM_XCPT_FLAGS_ERR)
3514 *uStackFrame.pu16++ = uErr;
3515 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3516 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3517 uStackFrame.pu16[2] = fEfl;
3518 uStackFrame.pu16[3] = pCtx->sp;
3519 uStackFrame.pu16[4] = pCtx->ss.Sel;
3520 if (fEfl & X86_EFL_VM)
3521 {
3522 uStackFrame.pu16[1] = pCtx->cs.Sel;
3523 uStackFrame.pu16[5] = pCtx->es.Sel;
3524 uStackFrame.pu16[6] = pCtx->ds.Sel;
3525 uStackFrame.pu16[7] = pCtx->fs.Sel;
3526 uStackFrame.pu16[8] = pCtx->gs.Sel;
3527 }
3528 }
3529 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3530 if (rcStrict != VINF_SUCCESS)
3531 return rcStrict;
3532
3533 /* Mark the selectors 'accessed' (hope this is the correct time). */
3534 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3535 * after pushing the stack frame? (Write protect the gdt + stack to
3536 * find out.) */
3537 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3538 {
3539 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3540 if (rcStrict != VINF_SUCCESS)
3541 return rcStrict;
3542 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3543 }
3544
3545 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3546 {
3547 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3548 if (rcStrict != VINF_SUCCESS)
3549 return rcStrict;
3550 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3551 }
3552
3553 /*
3554 * Start comitting the register changes (joins with the DPL=CPL branch).
3555 */
3556 pCtx->ss.Sel = NewSS;
3557 pCtx->ss.ValidSel = NewSS;
3558 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3559 pCtx->ss.u32Limit = cbLimitSS;
3560 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3561 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3562 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
3563 pIemCpu->uCpl = uNewCpl;
3564
3565 if (fEfl & X86_EFL_VM)
3566 {
3567 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3568 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3569 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3570 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3571 }
3572 }
3573 /*
3574 * Same privilege, no stack change and smaller stack frame.
3575 */
3576 else
3577 {
3578 uint64_t uNewRsp;
3579 RTPTRUNION uStackFrame;
3580 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3581 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3582 if (rcStrict != VINF_SUCCESS)
3583 return rcStrict;
3584 void * const pvStackFrame = uStackFrame.pv;
3585
3586 if (f32BitGate)
3587 {
3588 if (fFlags & IEM_XCPT_FLAGS_ERR)
3589 *uStackFrame.pu32++ = uErr;
3590 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3591 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3592 uStackFrame.pu32[2] = fEfl;
3593 }
3594 else
3595 {
3596 if (fFlags & IEM_XCPT_FLAGS_ERR)
3597 *uStackFrame.pu16++ = uErr;
3598 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3599 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3600 uStackFrame.pu16[2] = fEfl;
3601 }
3602 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3603 if (rcStrict != VINF_SUCCESS)
3604 return rcStrict;
3605
3606 /* Mark the CS selector as 'accessed'. */
3607 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3608 {
3609 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3610 if (rcStrict != VINF_SUCCESS)
3611 return rcStrict;
3612 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3613 }
3614
3615 /*
3616 * Start committing the register changes (joins with the other branch).
3617 */
3618 pCtx->rsp = uNewRsp;
3619 }
3620
3621 /* ... register committing continues. */
3622 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3623 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3624 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3625 pCtx->cs.u32Limit = cbLimitCS;
3626 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3627 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3628
3629 pCtx->rip = uNewEip;
3630 fEfl &= ~fEflToClear;
3631 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3632
3633 if (fFlags & IEM_XCPT_FLAGS_CR2)
3634 pCtx->cr2 = uCr2;
3635
3636 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3637 iemRaiseXcptAdjustState(pCtx, u8Vector);
3638
3639 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3640}
3641
3642
3643/**
3644 * Implements exceptions and interrupts for long mode.
3645 *
3646 * @returns VBox strict status code.
3647 * @param pIemCpu The IEM per CPU instance data.
3648 * @param pCtx The CPU context.
3649 * @param cbInstr The number of bytes to offset rIP by in the return
3650 * address.
3651 * @param u8Vector The interrupt / exception vector number.
3652 * @param fFlags The flags.
3653 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3654 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3655 */
3656IEM_STATIC VBOXSTRICTRC
3657iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3658 PCPUMCTX pCtx,
3659 uint8_t cbInstr,
3660 uint8_t u8Vector,
3661 uint32_t fFlags,
3662 uint16_t uErr,
3663 uint64_t uCr2)
3664{
3665 /*
3666 * Read the IDT entry.
3667 */
3668 uint16_t offIdt = (uint16_t)u8Vector << 4;
3669 if (pCtx->idtr.cbIdt < offIdt + 7)
3670 {
3671 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3672 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3673 }
3674 X86DESC64 Idte;
3675 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3676 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3677 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3678 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3679 return rcStrict;
3680 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3681 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3682 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3683
3684 /*
3685 * Check the descriptor type, DPL and such.
3686 * ASSUMES this is done in the same order as described for call-gate calls.
3687 */
3688 if (Idte.Gate.u1DescType)
3689 {
3690 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3691 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3692 }
3693 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3694 switch (Idte.Gate.u4Type)
3695 {
3696 case AMD64_SEL_TYPE_SYS_INT_GATE:
3697 fEflToClear |= X86_EFL_IF;
3698 break;
3699 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3700 break;
3701
3702 default:
3703 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3704 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3705 }
3706
3707 /* Check DPL against CPL if applicable. */
3708 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3709 {
3710 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3711 {
3712 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3713 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3714 }
3715 }
3716
3717 /* Is it there? */
3718 if (!Idte.Gate.u1Present)
3719 {
3720 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3721 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3722 }
3723
3724 /* A null CS is bad. */
3725 RTSEL NewCS = Idte.Gate.u16Sel;
3726 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3727 {
3728 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3729 return iemRaiseGeneralProtectionFault0(pIemCpu);
3730 }
3731
3732 /* Fetch the descriptor for the new CS. */
3733 IEMSELDESC DescCS;
3734 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3735 if (rcStrict != VINF_SUCCESS)
3736 {
3737 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3738 return rcStrict;
3739 }
3740
3741 /* Must be a 64-bit code segment. */
3742 if (!DescCS.Long.Gen.u1DescType)
3743 {
3744 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3745 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3746 }
3747 if ( !DescCS.Long.Gen.u1Long
3748 || DescCS.Long.Gen.u1DefBig
3749 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3750 {
3751 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3752 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3753 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3754 }
3755
3756 /* Don't allow lowering the privilege level. For non-conforming CS
3757 selectors, the CS.DPL sets the privilege level the trap/interrupt
3758 handler runs at. For conforming CS selectors, the CPL remains
3759 unchanged, but the CS.DPL must be <= CPL. */
3760 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3761 * when CPU in Ring-0. Result \#GP? */
3762 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3763 {
3764 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3765 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3766 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3767 }
3768
3769
3770 /* Make sure the selector is present. */
3771 if (!DescCS.Legacy.Gen.u1Present)
3772 {
3773 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3774 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3775 }
3776
3777 /* Check that the new RIP is canonical. */
3778 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3779 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3780 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3781 if (!IEM_IS_CANONICAL(uNewRip))
3782 {
3783 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3784 return iemRaiseGeneralProtectionFault0(pIemCpu);
3785 }
3786
3787 /*
3788 * If the privilege level changes or if the IST isn't zero, we need to get
3789 * a new stack from the TSS.
3790 */
3791 uint64_t uNewRsp;
3792 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3793 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3794 if ( uNewCpl != pIemCpu->uCpl
3795 || Idte.Gate.u3IST != 0)
3796 {
3797 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3798 if (rcStrict != VINF_SUCCESS)
3799 return rcStrict;
3800 }
3801 else
3802 uNewRsp = pCtx->rsp;
3803 uNewRsp &= ~(uint64_t)0xf;
3804
3805 /*
3806 * Calc the flag image to push.
3807 */
3808 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3809 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3810 fEfl &= ~X86_EFL_RF;
3811 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3812 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3813
3814 /*
3815 * Start making changes.
3816 */
3817
3818 /* Create the stack frame. */
3819 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3820 RTPTRUNION uStackFrame;
3821 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3822 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3823 if (rcStrict != VINF_SUCCESS)
3824 return rcStrict;
3825 void * const pvStackFrame = uStackFrame.pv;
3826
3827 if (fFlags & IEM_XCPT_FLAGS_ERR)
3828 *uStackFrame.pu64++ = uErr;
3829 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3830 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3831 uStackFrame.pu64[2] = fEfl;
3832 uStackFrame.pu64[3] = pCtx->rsp;
3833 uStackFrame.pu64[4] = pCtx->ss.Sel;
3834 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3835 if (rcStrict != VINF_SUCCESS)
3836 return rcStrict;
3837
3838 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3839 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3840 * after pushing the stack frame? (Write protect the gdt + stack to
3841 * find out.) */
3842 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3843 {
3844 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3845 if (rcStrict != VINF_SUCCESS)
3846 return rcStrict;
3847 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3848 }
3849
3850 /*
3851 * Start comitting the register changes.
3852 */
3853 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3854 * hidden registers when interrupting 32-bit or 16-bit code! */
3855 if (uNewCpl != pIemCpu->uCpl)
3856 {
3857 pCtx->ss.Sel = 0 | uNewCpl;
3858 pCtx->ss.ValidSel = 0 | uNewCpl;
3859 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3860 pCtx->ss.u32Limit = UINT32_MAX;
3861 pCtx->ss.u64Base = 0;
3862 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3863 }
3864 pCtx->rsp = uNewRsp - cbStackFrame;
3865 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3866 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3867 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3868 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3869 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3870 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3871 pCtx->rip = uNewRip;
3872 pIemCpu->uCpl = uNewCpl;
3873
3874 fEfl &= ~fEflToClear;
3875 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3876
3877 if (fFlags & IEM_XCPT_FLAGS_CR2)
3878 pCtx->cr2 = uCr2;
3879
3880 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3881 iemRaiseXcptAdjustState(pCtx, u8Vector);
3882
3883 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3884}
3885
3886
3887/**
3888 * Implements exceptions and interrupts.
3889 *
3890 * All exceptions and interrupts goes thru this function!
3891 *
3892 * @returns VBox strict status code.
3893 * @param pIemCpu The IEM per CPU instance data.
3894 * @param cbInstr The number of bytes to offset rIP by in the return
3895 * address.
3896 * @param u8Vector The interrupt / exception vector number.
3897 * @param fFlags The flags.
3898 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3899 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3900 */
3901DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3902iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3903 uint8_t cbInstr,
3904 uint8_t u8Vector,
3905 uint32_t fFlags,
3906 uint16_t uErr,
3907 uint64_t uCr2)
3908{
3909 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3910#ifdef IN_RING0
3911 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3912 AssertRCReturn(rc, rc);
3913#endif
3914
3915 /*
3916 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3917 */
3918 if ( pCtx->eflags.Bits.u1VM
3919 && pCtx->eflags.Bits.u2IOPL != 3
3920 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3921 && (pCtx->cr0 & X86_CR0_PE) )
3922 {
3923 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3924 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3925 u8Vector = X86_XCPT_GP;
3926 uErr = 0;
3927 }
3928#ifdef DBGFTRACE_ENABLED
3929 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3930 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3931 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3932#endif
3933
3934 /*
3935 * Do recursion accounting.
3936 */
3937 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3938 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3939 if (pIemCpu->cXcptRecursions == 0)
3940 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3941 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3942 else
3943 {
3944 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3945 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3946
3947 /** @todo double and tripple faults. */
3948 if (pIemCpu->cXcptRecursions >= 3)
3949 {
3950#ifdef DEBUG_bird
3951 AssertFailed();
3952#endif
3953 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3954 }
3955
3956 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
3957 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
3958 {
3959 ....
3960 } */
3961 }
3962 pIemCpu->cXcptRecursions++;
3963 pIemCpu->uCurXcpt = u8Vector;
3964 pIemCpu->fCurXcpt = fFlags;
3965
3966 /*
3967 * Extensive logging.
3968 */
3969#if defined(LOG_ENABLED) && defined(IN_RING3)
3970 if (LogIs3Enabled())
3971 {
3972 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3973 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3974 char szRegs[4096];
3975 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3976 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3977 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3978 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3979 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3980 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3981 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3982 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3983 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3984 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3985 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3986 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3987 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3988 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3989 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3990 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3991 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3992 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3993 " efer=%016VR{efer}\n"
3994 " pat=%016VR{pat}\n"
3995 " sf_mask=%016VR{sf_mask}\n"
3996 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3997 " lstar=%016VR{lstar}\n"
3998 " star=%016VR{star} cstar=%016VR{cstar}\n"
3999 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4000 );
4001
4002 char szInstr[256];
4003 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4004 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4005 szInstr, sizeof(szInstr), NULL);
4006 Log3(("%s%s\n", szRegs, szInstr));
4007 }
4008#endif /* LOG_ENABLED */
4009
4010 /*
4011 * Call the mode specific worker function.
4012 */
4013 VBOXSTRICTRC rcStrict;
4014 if (!(pCtx->cr0 & X86_CR0_PE))
4015 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4016 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4017 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4018 else
4019 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4020
4021 /*
4022 * Unwind.
4023 */
4024 pIemCpu->cXcptRecursions--;
4025 pIemCpu->uCurXcpt = uPrevXcpt;
4026 pIemCpu->fCurXcpt = fPrevXcpt;
4027 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4028 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4029 return rcStrict;
4030}
4031
4032
4033/** \#DE - 00. */
4034DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4035{
4036 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4037}
4038
4039
4040/** \#DB - 01.
4041 * @note This automatically clear DR7.GD. */
4042DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4043{
4044 /** @todo set/clear RF. */
4045 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4046 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4047}
4048
4049
4050/** \#UD - 06. */
4051DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4052{
4053 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4054}
4055
4056
4057/** \#NM - 07. */
4058DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4059{
4060 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4061}
4062
4063
4064/** \#TS(err) - 0a. */
4065DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4066{
4067 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4068}
4069
4070
4071/** \#TS(tr) - 0a. */
4072DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4073{
4074 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4075 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4076}
4077
4078
4079/** \#TS(0) - 0a. */
4080DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4081{
4082 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4083 0, 0);
4084}
4085
4086
4087/** \#TS(err) - 0a. */
4088DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4089{
4090 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4091 uSel & X86_SEL_MASK_OFF_RPL, 0);
4092}
4093
4094
4095/** \#NP(err) - 0b. */
4096DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4097{
4098 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4099}
4100
4101
4102/** \#NP(seg) - 0b. */
4103DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4104{
4105 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4106 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4107}
4108
4109
4110/** \#NP(sel) - 0b. */
4111DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4112{
4113 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4114 uSel & ~X86_SEL_RPL, 0);
4115}
4116
4117
4118/** \#SS(seg) - 0c. */
4119DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4120{
4121 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4122 uSel & ~X86_SEL_RPL, 0);
4123}
4124
4125
4126/** \#SS(err) - 0c. */
4127DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4128{
4129 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4130}
4131
4132
4133/** \#GP(n) - 0d. */
4134DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4135{
4136 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4137}
4138
4139
4140/** \#GP(0) - 0d. */
4141DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4142{
4143 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4144}
4145
4146
4147/** \#GP(sel) - 0d. */
4148DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4149{
4150 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4151 Sel & ~X86_SEL_RPL, 0);
4152}
4153
4154
4155/** \#GP(0) - 0d. */
4156DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4157{
4158 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4159}
4160
4161
4162/** \#GP(sel) - 0d. */
4163DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4164{
4165 NOREF(iSegReg); NOREF(fAccess);
4166 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4167 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4168}
4169
4170
4171/** \#GP(sel) - 0d. */
4172DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4173{
4174 NOREF(Sel);
4175 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4176}
4177
4178
4179/** \#GP(sel) - 0d. */
4180DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4181{
4182 NOREF(iSegReg); NOREF(fAccess);
4183 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4184}
4185
4186
4187/** \#PF(n) - 0e. */
4188DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4189{
4190 uint16_t uErr;
4191 switch (rc)
4192 {
4193 case VERR_PAGE_NOT_PRESENT:
4194 case VERR_PAGE_TABLE_NOT_PRESENT:
4195 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4196 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4197 uErr = 0;
4198 break;
4199
4200 default:
4201 AssertMsgFailed(("%Rrc\n", rc));
4202 case VERR_ACCESS_DENIED:
4203 uErr = X86_TRAP_PF_P;
4204 break;
4205
4206 /** @todo reserved */
4207 }
4208
4209 if (pIemCpu->uCpl == 3)
4210 uErr |= X86_TRAP_PF_US;
4211
4212 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4213 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4214 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4215 uErr |= X86_TRAP_PF_ID;
4216
4217#if 0 /* This is so much non-sense, really. Why was it done like that? */
4218 /* Note! RW access callers reporting a WRITE protection fault, will clear
4219 the READ flag before calling. So, read-modify-write accesses (RW)
4220 can safely be reported as READ faults. */
4221 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4222 uErr |= X86_TRAP_PF_RW;
4223#else
4224 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4225 {
4226 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4227 uErr |= X86_TRAP_PF_RW;
4228 }
4229#endif
4230
4231 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4232 uErr, GCPtrWhere);
4233}
4234
4235
4236/** \#MF(0) - 10. */
4237DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4238{
4239 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4240}
4241
4242
4243/** \#AC(0) - 11. */
4244DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4245{
4246 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4247}
4248
4249
4250/**
4251 * Macro for calling iemCImplRaiseDivideError().
4252 *
4253 * This enables us to add/remove arguments and force different levels of
4254 * inlining as we wish.
4255 *
4256 * @return Strict VBox status code.
4257 */
4258#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4259IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4260{
4261 NOREF(cbInstr);
4262 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4263}
4264
4265
4266/**
4267 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4268 *
4269 * This enables us to add/remove arguments and force different levels of
4270 * inlining as we wish.
4271 *
4272 * @return Strict VBox status code.
4273 */
4274#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4275IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4276{
4277 NOREF(cbInstr);
4278 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4279}
4280
4281
4282/**
4283 * Macro for calling iemCImplRaiseInvalidOpcode().
4284 *
4285 * This enables us to add/remove arguments and force different levels of
4286 * inlining as we wish.
4287 *
4288 * @return Strict VBox status code.
4289 */
4290#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4291IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4292{
4293 NOREF(cbInstr);
4294 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4295}
4296
4297
4298/** @} */
4299
4300
4301/*
4302 *
4303 * Helpers routines.
4304 * Helpers routines.
4305 * Helpers routines.
4306 *
4307 */
4308
4309/**
4310 * Recalculates the effective operand size.
4311 *
4312 * @param pIemCpu The IEM state.
4313 */
4314IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4315{
4316 switch (pIemCpu->enmCpuMode)
4317 {
4318 case IEMMODE_16BIT:
4319 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4320 break;
4321 case IEMMODE_32BIT:
4322 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4323 break;
4324 case IEMMODE_64BIT:
4325 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4326 {
4327 case 0:
4328 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4329 break;
4330 case IEM_OP_PRF_SIZE_OP:
4331 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4332 break;
4333 case IEM_OP_PRF_SIZE_REX_W:
4334 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4335 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4336 break;
4337 }
4338 break;
4339 default:
4340 AssertFailed();
4341 }
4342}
4343
4344
4345/**
4346 * Sets the default operand size to 64-bit and recalculates the effective
4347 * operand size.
4348 *
4349 * @param pIemCpu The IEM state.
4350 */
4351IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4352{
4353 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4354 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4355 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4356 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4357 else
4358 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4359}
4360
4361
4362/*
4363 *
4364 * Common opcode decoders.
4365 * Common opcode decoders.
4366 * Common opcode decoders.
4367 *
4368 */
4369//#include <iprt/mem.h>
4370
4371/**
4372 * Used to add extra details about a stub case.
4373 * @param pIemCpu The IEM per CPU state.
4374 */
4375IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4376{
4377#if defined(LOG_ENABLED) && defined(IN_RING3)
4378 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4379 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4380 char szRegs[4096];
4381 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4382 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4383 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4384 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4385 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4386 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4387 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4388 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4389 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4390 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4391 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4392 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4393 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4394 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4395 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4396 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4397 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4398 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4399 " efer=%016VR{efer}\n"
4400 " pat=%016VR{pat}\n"
4401 " sf_mask=%016VR{sf_mask}\n"
4402 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4403 " lstar=%016VR{lstar}\n"
4404 " star=%016VR{star} cstar=%016VR{cstar}\n"
4405 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4406 );
4407
4408 char szInstr[256];
4409 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4410 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4411 szInstr, sizeof(szInstr), NULL);
4412
4413 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4414#else
4415 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4416#endif
4417}
4418
4419/**
4420 * Complains about a stub.
4421 *
4422 * Providing two versions of this macro, one for daily use and one for use when
4423 * working on IEM.
4424 */
4425#if 0
4426# define IEMOP_BITCH_ABOUT_STUB() \
4427 do { \
4428 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4429 iemOpStubMsg2(pIemCpu); \
4430 RTAssertPanic(); \
4431 } while (0)
4432#else
4433# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4434#endif
4435
4436/** Stubs an opcode. */
4437#define FNIEMOP_STUB(a_Name) \
4438 FNIEMOP_DEF(a_Name) \
4439 { \
4440 IEMOP_BITCH_ABOUT_STUB(); \
4441 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4442 } \
4443 typedef int ignore_semicolon
4444
4445/** Stubs an opcode. */
4446#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4447 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4448 { \
4449 IEMOP_BITCH_ABOUT_STUB(); \
4450 NOREF(a_Name0); \
4451 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4452 } \
4453 typedef int ignore_semicolon
4454
4455/** Stubs an opcode which currently should raise \#UD. */
4456#define FNIEMOP_UD_STUB(a_Name) \
4457 FNIEMOP_DEF(a_Name) \
4458 { \
4459 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4460 return IEMOP_RAISE_INVALID_OPCODE(); \
4461 } \
4462 typedef int ignore_semicolon
4463
4464/** Stubs an opcode which currently should raise \#UD. */
4465#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4466 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4467 { \
4468 NOREF(a_Name0); \
4469 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4470 return IEMOP_RAISE_INVALID_OPCODE(); \
4471 } \
4472 typedef int ignore_semicolon
4473
4474
4475
4476/** @name Register Access.
4477 * @{
4478 */
4479
4480/**
4481 * Gets a reference (pointer) to the specified hidden segment register.
4482 *
4483 * @returns Hidden register reference.
4484 * @param pIemCpu The per CPU data.
4485 * @param iSegReg The segment register.
4486 */
4487IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4488{
4489 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4490 PCPUMSELREG pSReg;
4491 switch (iSegReg)
4492 {
4493 case X86_SREG_ES: pSReg = &pCtx->es; break;
4494 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4495 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4496 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4497 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4498 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4499 default:
4500 AssertFailedReturn(NULL);
4501 }
4502#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4503 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4504 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4505#else
4506 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4507#endif
4508 return pSReg;
4509}
4510
4511
4512/**
4513 * Gets a reference (pointer) to the specified segment register (the selector
4514 * value).
4515 *
4516 * @returns Pointer to the selector variable.
4517 * @param pIemCpu The per CPU data.
4518 * @param iSegReg The segment register.
4519 */
4520IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4521{
4522 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4523 switch (iSegReg)
4524 {
4525 case X86_SREG_ES: return &pCtx->es.Sel;
4526 case X86_SREG_CS: return &pCtx->cs.Sel;
4527 case X86_SREG_SS: return &pCtx->ss.Sel;
4528 case X86_SREG_DS: return &pCtx->ds.Sel;
4529 case X86_SREG_FS: return &pCtx->fs.Sel;
4530 case X86_SREG_GS: return &pCtx->gs.Sel;
4531 }
4532 AssertFailedReturn(NULL);
4533}
4534
4535
4536/**
4537 * Fetches the selector value of a segment register.
4538 *
4539 * @returns The selector value.
4540 * @param pIemCpu The per CPU data.
4541 * @param iSegReg The segment register.
4542 */
4543IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4544{
4545 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4546 switch (iSegReg)
4547 {
4548 case X86_SREG_ES: return pCtx->es.Sel;
4549 case X86_SREG_CS: return pCtx->cs.Sel;
4550 case X86_SREG_SS: return pCtx->ss.Sel;
4551 case X86_SREG_DS: return pCtx->ds.Sel;
4552 case X86_SREG_FS: return pCtx->fs.Sel;
4553 case X86_SREG_GS: return pCtx->gs.Sel;
4554 }
4555 AssertFailedReturn(0xffff);
4556}
4557
4558
4559/**
4560 * Gets a reference (pointer) to the specified general register.
4561 *
4562 * @returns Register reference.
4563 * @param pIemCpu The per CPU data.
4564 * @param iReg The general register.
4565 */
4566IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4567{
4568 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4569 switch (iReg)
4570 {
4571 case X86_GREG_xAX: return &pCtx->rax;
4572 case X86_GREG_xCX: return &pCtx->rcx;
4573 case X86_GREG_xDX: return &pCtx->rdx;
4574 case X86_GREG_xBX: return &pCtx->rbx;
4575 case X86_GREG_xSP: return &pCtx->rsp;
4576 case X86_GREG_xBP: return &pCtx->rbp;
4577 case X86_GREG_xSI: return &pCtx->rsi;
4578 case X86_GREG_xDI: return &pCtx->rdi;
4579 case X86_GREG_x8: return &pCtx->r8;
4580 case X86_GREG_x9: return &pCtx->r9;
4581 case X86_GREG_x10: return &pCtx->r10;
4582 case X86_GREG_x11: return &pCtx->r11;
4583 case X86_GREG_x12: return &pCtx->r12;
4584 case X86_GREG_x13: return &pCtx->r13;
4585 case X86_GREG_x14: return &pCtx->r14;
4586 case X86_GREG_x15: return &pCtx->r15;
4587 }
4588 AssertFailedReturn(NULL);
4589}
4590
4591
4592/**
4593 * Gets a reference (pointer) to the specified 8-bit general register.
4594 *
4595 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4596 *
4597 * @returns Register reference.
4598 * @param pIemCpu The per CPU data.
4599 * @param iReg The register.
4600 */
4601IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4602{
4603 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4604 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4605
4606 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4607 if (iReg >= 4)
4608 pu8Reg++;
4609 return pu8Reg;
4610}
4611
4612
4613/**
4614 * Fetches the value of a 8-bit general register.
4615 *
4616 * @returns The register value.
4617 * @param pIemCpu The per CPU data.
4618 * @param iReg The register.
4619 */
4620IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4621{
4622 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4623 return *pbSrc;
4624}
4625
4626
4627/**
4628 * Fetches the value of a 16-bit general register.
4629 *
4630 * @returns The register value.
4631 * @param pIemCpu The per CPU data.
4632 * @param iReg The register.
4633 */
4634IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4635{
4636 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4637}
4638
4639
4640/**
4641 * Fetches the value of a 32-bit general register.
4642 *
4643 * @returns The register value.
4644 * @param pIemCpu The per CPU data.
4645 * @param iReg The register.
4646 */
4647IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4648{
4649 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4650}
4651
4652
4653/**
4654 * Fetches the value of a 64-bit general register.
4655 *
4656 * @returns The register value.
4657 * @param pIemCpu The per CPU data.
4658 * @param iReg The register.
4659 */
4660IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4661{
4662 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4663}
4664
4665
4666/**
4667 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4668 *
4669 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4670 * segment limit.
4671 *
4672 * @param pIemCpu The per CPU data.
4673 * @param offNextInstr The offset of the next instruction.
4674 */
4675IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4676{
4677 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4678 switch (pIemCpu->enmEffOpSize)
4679 {
4680 case IEMMODE_16BIT:
4681 {
4682 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4683 if ( uNewIp > pCtx->cs.u32Limit
4684 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4685 return iemRaiseGeneralProtectionFault0(pIemCpu);
4686 pCtx->rip = uNewIp;
4687 break;
4688 }
4689
4690 case IEMMODE_32BIT:
4691 {
4692 Assert(pCtx->rip <= UINT32_MAX);
4693 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4694
4695 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4696 if (uNewEip > pCtx->cs.u32Limit)
4697 return iemRaiseGeneralProtectionFault0(pIemCpu);
4698 pCtx->rip = uNewEip;
4699 break;
4700 }
4701
4702 case IEMMODE_64BIT:
4703 {
4704 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4705
4706 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4707 if (!IEM_IS_CANONICAL(uNewRip))
4708 return iemRaiseGeneralProtectionFault0(pIemCpu);
4709 pCtx->rip = uNewRip;
4710 break;
4711 }
4712
4713 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4714 }
4715
4716 pCtx->eflags.Bits.u1RF = 0;
4717 return VINF_SUCCESS;
4718}
4719
4720
4721/**
4722 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4723 *
4724 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4725 * segment limit.
4726 *
4727 * @returns Strict VBox status code.
4728 * @param pIemCpu The per CPU data.
4729 * @param offNextInstr The offset of the next instruction.
4730 */
4731IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4732{
4733 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4734 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4735
4736 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4737 if ( uNewIp > pCtx->cs.u32Limit
4738 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4739 return iemRaiseGeneralProtectionFault0(pIemCpu);
4740 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4741 pCtx->rip = uNewIp;
4742 pCtx->eflags.Bits.u1RF = 0;
4743
4744 return VINF_SUCCESS;
4745}
4746
4747
4748/**
4749 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4750 *
4751 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4752 * segment limit.
4753 *
4754 * @returns Strict VBox status code.
4755 * @param pIemCpu The per CPU data.
4756 * @param offNextInstr The offset of the next instruction.
4757 */
4758IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4759{
4760 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4761 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4762
4763 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4764 {
4765 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4766
4767 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4768 if (uNewEip > pCtx->cs.u32Limit)
4769 return iemRaiseGeneralProtectionFault0(pIemCpu);
4770 pCtx->rip = uNewEip;
4771 }
4772 else
4773 {
4774 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4775
4776 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4777 if (!IEM_IS_CANONICAL(uNewRip))
4778 return iemRaiseGeneralProtectionFault0(pIemCpu);
4779 pCtx->rip = uNewRip;
4780 }
4781 pCtx->eflags.Bits.u1RF = 0;
4782 return VINF_SUCCESS;
4783}
4784
4785
4786/**
4787 * Performs a near jump to the specified address.
4788 *
4789 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4790 * segment limit.
4791 *
4792 * @param pIemCpu The per CPU data.
4793 * @param uNewRip The new RIP value.
4794 */
4795IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4796{
4797 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4798 switch (pIemCpu->enmEffOpSize)
4799 {
4800 case IEMMODE_16BIT:
4801 {
4802 Assert(uNewRip <= UINT16_MAX);
4803 if ( uNewRip > pCtx->cs.u32Limit
4804 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4805 return iemRaiseGeneralProtectionFault0(pIemCpu);
4806 /** @todo Test 16-bit jump in 64-bit mode. */
4807 pCtx->rip = uNewRip;
4808 break;
4809 }
4810
4811 case IEMMODE_32BIT:
4812 {
4813 Assert(uNewRip <= UINT32_MAX);
4814 Assert(pCtx->rip <= UINT32_MAX);
4815 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4816
4817 if (uNewRip > pCtx->cs.u32Limit)
4818 return iemRaiseGeneralProtectionFault0(pIemCpu);
4819 pCtx->rip = uNewRip;
4820 break;
4821 }
4822
4823 case IEMMODE_64BIT:
4824 {
4825 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4826
4827 if (!IEM_IS_CANONICAL(uNewRip))
4828 return iemRaiseGeneralProtectionFault0(pIemCpu);
4829 pCtx->rip = uNewRip;
4830 break;
4831 }
4832
4833 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4834 }
4835
4836 pCtx->eflags.Bits.u1RF = 0;
4837 return VINF_SUCCESS;
4838}
4839
4840
4841/**
4842 * Get the address of the top of the stack.
4843 *
4844 * @param pIemCpu The per CPU data.
4845 * @param pCtx The CPU context which SP/ESP/RSP should be
4846 * read.
4847 */
4848DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4849{
4850 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4851 return pCtx->rsp;
4852 if (pCtx->ss.Attr.n.u1DefBig)
4853 return pCtx->esp;
4854 return pCtx->sp;
4855}
4856
4857
4858/**
4859 * Updates the RIP/EIP/IP to point to the next instruction.
4860 *
4861 * This function leaves the EFLAGS.RF flag alone.
4862 *
4863 * @param pIemCpu The per CPU data.
4864 * @param cbInstr The number of bytes to add.
4865 */
4866IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4867{
4868 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4869 switch (pIemCpu->enmCpuMode)
4870 {
4871 case IEMMODE_16BIT:
4872 Assert(pCtx->rip <= UINT16_MAX);
4873 pCtx->eip += cbInstr;
4874 pCtx->eip &= UINT32_C(0xffff);
4875 break;
4876
4877 case IEMMODE_32BIT:
4878 pCtx->eip += cbInstr;
4879 Assert(pCtx->rip <= UINT32_MAX);
4880 break;
4881
4882 case IEMMODE_64BIT:
4883 pCtx->rip += cbInstr;
4884 break;
4885 default: AssertFailed();
4886 }
4887}
4888
4889
4890#if 0
4891/**
4892 * Updates the RIP/EIP/IP to point to the next instruction.
4893 *
4894 * @param pIemCpu The per CPU data.
4895 */
4896IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4897{
4898 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4899}
4900#endif
4901
4902
4903
4904/**
4905 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4906 *
4907 * @param pIemCpu The per CPU data.
4908 * @param cbInstr The number of bytes to add.
4909 */
4910IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4911{
4912 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4913
4914 pCtx->eflags.Bits.u1RF = 0;
4915
4916 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4917 switch (pIemCpu->enmCpuMode)
4918 {
4919 /** @todo investigate if EIP or RIP is really incremented. */
4920 case IEMMODE_16BIT:
4921 case IEMMODE_32BIT:
4922 pCtx->eip += cbInstr;
4923 Assert(pCtx->rip <= UINT32_MAX);
4924 break;
4925
4926 case IEMMODE_64BIT:
4927 pCtx->rip += cbInstr;
4928 break;
4929 default: AssertFailed();
4930 }
4931}
4932
4933
4934/**
4935 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4936 *
4937 * @param pIemCpu The per CPU data.
4938 */
4939IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4940{
4941 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4942}
4943
4944
4945/**
4946 * Adds to the stack pointer.
4947 *
4948 * @param pIemCpu The per CPU data.
4949 * @param pCtx The CPU context which SP/ESP/RSP should be
4950 * updated.
4951 * @param cbToAdd The number of bytes to add.
4952 */
4953DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
4954{
4955 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4956 pCtx->rsp += cbToAdd;
4957 else if (pCtx->ss.Attr.n.u1DefBig)
4958 pCtx->esp += cbToAdd;
4959 else
4960 pCtx->sp += cbToAdd;
4961}
4962
4963
4964/**
4965 * Subtracts from the stack pointer.
4966 *
4967 * @param pIemCpu The per CPU data.
4968 * @param pCtx The CPU context which SP/ESP/RSP should be
4969 * updated.
4970 * @param cbToSub The number of bytes to subtract.
4971 */
4972DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4973{
4974 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4975 pCtx->rsp -= cbToSub;
4976 else if (pCtx->ss.Attr.n.u1DefBig)
4977 pCtx->esp -= cbToSub;
4978 else
4979 pCtx->sp -= cbToSub;
4980}
4981
4982
4983/**
4984 * Adds to the temporary stack pointer.
4985 *
4986 * @param pIemCpu The per CPU data.
4987 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4988 * @param cbToAdd The number of bytes to add.
4989 * @param pCtx Where to get the current stack mode.
4990 */
4991DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4992{
4993 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4994 pTmpRsp->u += cbToAdd;
4995 else if (pCtx->ss.Attr.n.u1DefBig)
4996 pTmpRsp->DWords.dw0 += cbToAdd;
4997 else
4998 pTmpRsp->Words.w0 += cbToAdd;
4999}
5000
5001
5002/**
5003 * Subtracts from the temporary stack pointer.
5004 *
5005 * @param pIemCpu The per CPU data.
5006 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5007 * @param cbToSub The number of bytes to subtract.
5008 * @param pCtx Where to get the current stack mode.
5009 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5010 * expecting that.
5011 */
5012DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5013{
5014 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5015 pTmpRsp->u -= cbToSub;
5016 else if (pCtx->ss.Attr.n.u1DefBig)
5017 pTmpRsp->DWords.dw0 -= cbToSub;
5018 else
5019 pTmpRsp->Words.w0 -= cbToSub;
5020}
5021
5022
5023/**
5024 * Calculates the effective stack address for a push of the specified size as
5025 * well as the new RSP value (upper bits may be masked).
5026 *
5027 * @returns Effective stack addressf for the push.
5028 * @param pIemCpu The IEM per CPU data.
5029 * @param pCtx Where to get the current stack mode.
5030 * @param cbItem The size of the stack item to pop.
5031 * @param puNewRsp Where to return the new RSP value.
5032 */
5033DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5034{
5035 RTUINT64U uTmpRsp;
5036 RTGCPTR GCPtrTop;
5037 uTmpRsp.u = pCtx->rsp;
5038
5039 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5040 GCPtrTop = uTmpRsp.u -= cbItem;
5041 else if (pCtx->ss.Attr.n.u1DefBig)
5042 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5043 else
5044 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5045 *puNewRsp = uTmpRsp.u;
5046 return GCPtrTop;
5047}
5048
5049
5050/**
5051 * Gets the current stack pointer and calculates the value after a pop of the
5052 * specified size.
5053 *
5054 * @returns Current stack pointer.
5055 * @param pIemCpu The per CPU data.
5056 * @param pCtx Where to get the current stack mode.
5057 * @param cbItem The size of the stack item to pop.
5058 * @param puNewRsp Where to return the new RSP value.
5059 */
5060DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5061{
5062 RTUINT64U uTmpRsp;
5063 RTGCPTR GCPtrTop;
5064 uTmpRsp.u = pCtx->rsp;
5065
5066 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5067 {
5068 GCPtrTop = uTmpRsp.u;
5069 uTmpRsp.u += cbItem;
5070 }
5071 else if (pCtx->ss.Attr.n.u1DefBig)
5072 {
5073 GCPtrTop = uTmpRsp.DWords.dw0;
5074 uTmpRsp.DWords.dw0 += cbItem;
5075 }
5076 else
5077 {
5078 GCPtrTop = uTmpRsp.Words.w0;
5079 uTmpRsp.Words.w0 += cbItem;
5080 }
5081 *puNewRsp = uTmpRsp.u;
5082 return GCPtrTop;
5083}
5084
5085
5086/**
5087 * Calculates the effective stack address for a push of the specified size as
5088 * well as the new temporary RSP value (upper bits may be masked).
5089 *
5090 * @returns Effective stack addressf for the push.
5091 * @param pIemCpu The per CPU data.
5092 * @param pCtx Where to get the current stack mode.
5093 * @param pTmpRsp The temporary stack pointer. This is updated.
5094 * @param cbItem The size of the stack item to pop.
5095 */
5096DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5097{
5098 RTGCPTR GCPtrTop;
5099
5100 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5101 GCPtrTop = pTmpRsp->u -= cbItem;
5102 else if (pCtx->ss.Attr.n.u1DefBig)
5103 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5104 else
5105 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5106 return GCPtrTop;
5107}
5108
5109
5110/**
5111 * Gets the effective stack address for a pop of the specified size and
5112 * calculates and updates the temporary RSP.
5113 *
5114 * @returns Current stack pointer.
5115 * @param pIemCpu The per CPU data.
5116 * @param pCtx Where to get the current stack mode.
5117 * @param pTmpRsp The temporary stack pointer. This is updated.
5118 * @param cbItem The size of the stack item to pop.
5119 */
5120DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5121{
5122 RTGCPTR GCPtrTop;
5123 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5124 {
5125 GCPtrTop = pTmpRsp->u;
5126 pTmpRsp->u += cbItem;
5127 }
5128 else if (pCtx->ss.Attr.n.u1DefBig)
5129 {
5130 GCPtrTop = pTmpRsp->DWords.dw0;
5131 pTmpRsp->DWords.dw0 += cbItem;
5132 }
5133 else
5134 {
5135 GCPtrTop = pTmpRsp->Words.w0;
5136 pTmpRsp->Words.w0 += cbItem;
5137 }
5138 return GCPtrTop;
5139}
5140
5141/** @} */
5142
5143
5144/** @name FPU access and helpers.
5145 *
5146 * @{
5147 */
5148
5149
5150/**
5151 * Hook for preparing to use the host FPU.
5152 *
5153 * This is necessary in ring-0 and raw-mode context.
5154 *
5155 * @param pIemCpu The IEM per CPU data.
5156 */
5157DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5158{
5159#ifdef IN_RING3
5160 NOREF(pIemCpu);
5161#else
5162/** @todo RZ: FIXME */
5163//# error "Implement me"
5164#endif
5165}
5166
5167
5168/**
5169 * Hook for preparing to use the host FPU for SSE
5170 *
5171 * This is necessary in ring-0 and raw-mode context.
5172 *
5173 * @param pIemCpu The IEM per CPU data.
5174 */
5175DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5176{
5177 iemFpuPrepareUsage(pIemCpu);
5178}
5179
5180
5181/**
5182 * Stores a QNaN value into a FPU register.
5183 *
5184 * @param pReg Pointer to the register.
5185 */
5186DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5187{
5188 pReg->au32[0] = UINT32_C(0x00000000);
5189 pReg->au32[1] = UINT32_C(0xc0000000);
5190 pReg->au16[4] = UINT16_C(0xffff);
5191}
5192
5193
5194/**
5195 * Updates the FOP, FPU.CS and FPUIP registers.
5196 *
5197 * @param pIemCpu The IEM per CPU data.
5198 * @param pCtx The CPU context.
5199 * @param pFpuCtx The FPU context.
5200 */
5201DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5202{
5203 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5204 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5205 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5206 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5207 {
5208 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5209 * happens in real mode here based on the fnsave and fnstenv images. */
5210 pFpuCtx->CS = 0;
5211 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5212 }
5213 else
5214 {
5215 pFpuCtx->CS = pCtx->cs.Sel;
5216 pFpuCtx->FPUIP = pCtx->rip;
5217 }
5218}
5219
5220
5221/**
5222 * Updates the x87.DS and FPUDP registers.
5223 *
5224 * @param pIemCpu The IEM per CPU data.
5225 * @param pCtx The CPU context.
5226 * @param pFpuCtx The FPU context.
5227 * @param iEffSeg The effective segment register.
5228 * @param GCPtrEff The effective address relative to @a iEffSeg.
5229 */
5230DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5231{
5232 RTSEL sel;
5233 switch (iEffSeg)
5234 {
5235 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5236 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5237 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5238 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5239 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5240 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5241 default:
5242 AssertMsgFailed(("%d\n", iEffSeg));
5243 sel = pCtx->ds.Sel;
5244 }
5245 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5246 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5247 {
5248 pFpuCtx->DS = 0;
5249 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5250 }
5251 else
5252 {
5253 pFpuCtx->DS = sel;
5254 pFpuCtx->FPUDP = GCPtrEff;
5255 }
5256}
5257
5258
5259/**
5260 * Rotates the stack registers in the push direction.
5261 *
5262 * @param pFpuCtx The FPU context.
5263 * @remarks This is a complete waste of time, but fxsave stores the registers in
5264 * stack order.
5265 */
5266DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5267{
5268 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5269 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5270 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5271 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5272 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5273 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5274 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5275 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5276 pFpuCtx->aRegs[0].r80 = r80Tmp;
5277}
5278
5279
5280/**
5281 * Rotates the stack registers in the pop direction.
5282 *
5283 * @param pFpuCtx The FPU context.
5284 * @remarks This is a complete waste of time, but fxsave stores the registers in
5285 * stack order.
5286 */
5287DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5288{
5289 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5290 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5291 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5292 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5293 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5294 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5295 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5296 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5297 pFpuCtx->aRegs[7].r80 = r80Tmp;
5298}
5299
5300
5301/**
5302 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5303 * exception prevents it.
5304 *
5305 * @param pIemCpu The IEM per CPU data.
5306 * @param pResult The FPU operation result to push.
5307 * @param pFpuCtx The FPU context.
5308 */
5309IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5310{
5311 /* Update FSW and bail if there are pending exceptions afterwards. */
5312 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5313 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5314 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5315 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5316 {
5317 pFpuCtx->FSW = fFsw;
5318 return;
5319 }
5320
5321 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5322 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5323 {
5324 /* All is fine, push the actual value. */
5325 pFpuCtx->FTW |= RT_BIT(iNewTop);
5326 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5327 }
5328 else if (pFpuCtx->FCW & X86_FCW_IM)
5329 {
5330 /* Masked stack overflow, push QNaN. */
5331 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5332 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5333 }
5334 else
5335 {
5336 /* Raise stack overflow, don't push anything. */
5337 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5338 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5339 return;
5340 }
5341
5342 fFsw &= ~X86_FSW_TOP_MASK;
5343 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5344 pFpuCtx->FSW = fFsw;
5345
5346 iemFpuRotateStackPush(pFpuCtx);
5347}
5348
5349
5350/**
5351 * Stores a result in a FPU register and updates the FSW and FTW.
5352 *
5353 * @param pFpuCtx The FPU context.
5354 * @param pResult The result to store.
5355 * @param iStReg Which FPU register to store it in.
5356 */
5357IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5358{
5359 Assert(iStReg < 8);
5360 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5361 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5362 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5363 pFpuCtx->FTW |= RT_BIT(iReg);
5364 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5365}
5366
5367
5368/**
5369 * Only updates the FPU status word (FSW) with the result of the current
5370 * instruction.
5371 *
5372 * @param pFpuCtx The FPU context.
5373 * @param u16FSW The FSW output of the current instruction.
5374 */
5375IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5376{
5377 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5378 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5379}
5380
5381
5382/**
5383 * Pops one item off the FPU stack if no pending exception prevents it.
5384 *
5385 * @param pFpuCtx The FPU context.
5386 */
5387IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5388{
5389 /* Check pending exceptions. */
5390 uint16_t uFSW = pFpuCtx->FSW;
5391 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5392 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5393 return;
5394
5395 /* TOP--. */
5396 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5397 uFSW &= ~X86_FSW_TOP_MASK;
5398 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5399 pFpuCtx->FSW = uFSW;
5400
5401 /* Mark the previous ST0 as empty. */
5402 iOldTop >>= X86_FSW_TOP_SHIFT;
5403 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5404
5405 /* Rotate the registers. */
5406 iemFpuRotateStackPop(pFpuCtx);
5407}
5408
5409
5410/**
5411 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5412 *
5413 * @param pIemCpu The IEM per CPU data.
5414 * @param pResult The FPU operation result to push.
5415 */
5416IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5417{
5418 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5419 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5420 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5421 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5422}
5423
5424
5425/**
5426 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5427 * and sets FPUDP and FPUDS.
5428 *
5429 * @param pIemCpu The IEM per CPU data.
5430 * @param pResult The FPU operation result to push.
5431 * @param iEffSeg The effective segment register.
5432 * @param GCPtrEff The effective address relative to @a iEffSeg.
5433 */
5434IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5435{
5436 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5437 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5438 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5439 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5440 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5441}
5442
5443
5444/**
5445 * Replace ST0 with the first value and push the second onto the FPU stack,
5446 * unless a pending exception prevents it.
5447 *
5448 * @param pIemCpu The IEM per CPU data.
5449 * @param pResult The FPU operation result to store and push.
5450 */
5451IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5452{
5453 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5454 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5455 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5456
5457 /* Update FSW and bail if there are pending exceptions afterwards. */
5458 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5459 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5460 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5461 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5462 {
5463 pFpuCtx->FSW = fFsw;
5464 return;
5465 }
5466
5467 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5468 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5469 {
5470 /* All is fine, push the actual value. */
5471 pFpuCtx->FTW |= RT_BIT(iNewTop);
5472 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5473 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5474 }
5475 else if (pFpuCtx->FCW & X86_FCW_IM)
5476 {
5477 /* Masked stack overflow, push QNaN. */
5478 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5479 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5480 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5481 }
5482 else
5483 {
5484 /* Raise stack overflow, don't push anything. */
5485 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5486 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5487 return;
5488 }
5489
5490 fFsw &= ~X86_FSW_TOP_MASK;
5491 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5492 pFpuCtx->FSW = fFsw;
5493
5494 iemFpuRotateStackPush(pFpuCtx);
5495}
5496
5497
5498/**
5499 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5500 * FOP.
5501 *
5502 * @param pIemCpu The IEM per CPU data.
5503 * @param pResult The result to store.
5504 * @param iStReg Which FPU register to store it in.
5505 */
5506IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5507{
5508 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5509 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5510 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5511 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5512}
5513
5514
5515/**
5516 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5517 * FOP, and then pops the stack.
5518 *
5519 * @param pIemCpu The IEM per CPU data.
5520 * @param pResult The result to store.
5521 * @param iStReg Which FPU register to store it in.
5522 */
5523IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5524{
5525 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5526 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5527 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5528 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5529 iemFpuMaybePopOne(pFpuCtx);
5530}
5531
5532
5533/**
5534 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5535 * FPUDP, and FPUDS.
5536 *
5537 * @param pIemCpu The IEM per CPU data.
5538 * @param pResult The result to store.
5539 * @param iStReg Which FPU register to store it in.
5540 * @param iEffSeg The effective memory operand selector register.
5541 * @param GCPtrEff The effective memory operand offset.
5542 */
5543IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5544 uint8_t iEffSeg, RTGCPTR GCPtrEff)
5545{
5546 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5547 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5548 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5549 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5550 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5551}
5552
5553
5554/**
5555 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5556 * FPUDP, and FPUDS, and then pops the stack.
5557 *
5558 * @param pIemCpu The IEM per CPU data.
5559 * @param pResult The result to store.
5560 * @param iStReg Which FPU register to store it in.
5561 * @param iEffSeg The effective memory operand selector register.
5562 * @param GCPtrEff The effective memory operand offset.
5563 */
5564IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5565 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5566{
5567 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5568 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5569 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5570 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5571 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5572 iemFpuMaybePopOne(pFpuCtx);
5573}
5574
5575
5576/**
5577 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5578 *
5579 * @param pIemCpu The IEM per CPU data.
5580 */
5581IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5582{
5583 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5584 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5585 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5586}
5587
5588
5589/**
5590 * Marks the specified stack register as free (for FFREE).
5591 *
5592 * @param pIemCpu The IEM per CPU data.
5593 * @param iStReg The register to free.
5594 */
5595IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5596{
5597 Assert(iStReg < 8);
5598 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5599 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5600 pFpuCtx->FTW &= ~RT_BIT(iReg);
5601}
5602
5603
5604/**
5605 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5606 *
5607 * @param pIemCpu The IEM per CPU data.
5608 */
5609IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5610{
5611 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5612 uint16_t uFsw = pFpuCtx->FSW;
5613 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5614 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5615 uFsw &= ~X86_FSW_TOP_MASK;
5616 uFsw |= uTop;
5617 pFpuCtx->FSW = uFsw;
5618}
5619
5620
5621/**
5622 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5623 *
5624 * @param pIemCpu The IEM per CPU data.
5625 */
5626IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5627{
5628 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5629 uint16_t uFsw = pFpuCtx->FSW;
5630 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5631 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5632 uFsw &= ~X86_FSW_TOP_MASK;
5633 uFsw |= uTop;
5634 pFpuCtx->FSW = uFsw;
5635}
5636
5637
5638/**
5639 * Updates the FSW, FOP, FPUIP, and FPUCS.
5640 *
5641 * @param pIemCpu The IEM per CPU data.
5642 * @param u16FSW The FSW from the current instruction.
5643 */
5644IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5645{
5646 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5647 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5648 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5649 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5650}
5651
5652
5653/**
5654 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5655 *
5656 * @param pIemCpu The IEM per CPU data.
5657 * @param u16FSW The FSW from the current instruction.
5658 */
5659IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5660{
5661 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5662 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5663 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5664 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5665 iemFpuMaybePopOne(pFpuCtx);
5666}
5667
5668
5669/**
5670 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5671 *
5672 * @param pIemCpu The IEM per CPU data.
5673 * @param u16FSW The FSW from the current instruction.
5674 * @param iEffSeg The effective memory operand selector register.
5675 * @param GCPtrEff The effective memory operand offset.
5676 */
5677IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5678{
5679 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5680 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5681 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5682 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5683 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5684}
5685
5686
5687/**
5688 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5689 *
5690 * @param pIemCpu The IEM per CPU data.
5691 * @param u16FSW The FSW from the current instruction.
5692 */
5693IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5694{
5695 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5696 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5697 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5698 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5699 iemFpuMaybePopOne(pFpuCtx);
5700 iemFpuMaybePopOne(pFpuCtx);
5701}
5702
5703
5704/**
5705 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5706 *
5707 * @param pIemCpu The IEM per CPU data.
5708 * @param u16FSW The FSW from the current instruction.
5709 * @param iEffSeg The effective memory operand selector register.
5710 * @param GCPtrEff The effective memory operand offset.
5711 */
5712IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5713{
5714 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5715 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5716 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5717 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5718 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5719 iemFpuMaybePopOne(pFpuCtx);
5720}
5721
5722
5723/**
5724 * Worker routine for raising an FPU stack underflow exception.
5725 *
5726 * @param pIemCpu The IEM per CPU data.
5727 * @param pFpuCtx The FPU context.
5728 * @param iStReg The stack register being accessed.
5729 */
5730IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5731{
5732 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5733 if (pFpuCtx->FCW & X86_FCW_IM)
5734 {
5735 /* Masked underflow. */
5736 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5737 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5738 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5739 if (iStReg != UINT8_MAX)
5740 {
5741 pFpuCtx->FTW |= RT_BIT(iReg);
5742 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5743 }
5744 }
5745 else
5746 {
5747 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5748 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5749 }
5750}
5751
5752
5753/**
5754 * Raises a FPU stack underflow exception.
5755 *
5756 * @param pIemCpu The IEM per CPU data.
5757 * @param iStReg The destination register that should be loaded
5758 * with QNaN if \#IS is not masked. Specify
5759 * UINT8_MAX if none (like for fcom).
5760 */
5761DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5762{
5763 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5764 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5765 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5766 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5767}
5768
5769
5770DECL_NO_INLINE(IEM_STATIC, void)
5771iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5772{
5773 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5774 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5775 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5776 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5777 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5778}
5779
5780
5781DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5782{
5783 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5784 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5785 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5786 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5787 iemFpuMaybePopOne(pFpuCtx);
5788}
5789
5790
5791DECL_NO_INLINE(IEM_STATIC, void)
5792iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5793{
5794 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5795 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5796 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5797 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5798 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5799 iemFpuMaybePopOne(pFpuCtx);
5800}
5801
5802
5803DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5804{
5805 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5806 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5807 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5808 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5809 iemFpuMaybePopOne(pFpuCtx);
5810 iemFpuMaybePopOne(pFpuCtx);
5811}
5812
5813
5814DECL_NO_INLINE(IEM_STATIC, void)
5815iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5816{
5817 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5818 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5819 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5820
5821 if (pFpuCtx->FCW & X86_FCW_IM)
5822 {
5823 /* Masked overflow - Push QNaN. */
5824 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5825 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5826 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5827 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5828 pFpuCtx->FTW |= RT_BIT(iNewTop);
5829 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5830 iemFpuRotateStackPush(pFpuCtx);
5831 }
5832 else
5833 {
5834 /* Exception pending - don't change TOP or the register stack. */
5835 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5836 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5837 }
5838}
5839
5840
5841DECL_NO_INLINE(IEM_STATIC, void)
5842iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5843{
5844 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5845 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5846 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5847
5848 if (pFpuCtx->FCW & X86_FCW_IM)
5849 {
5850 /* Masked overflow - Push QNaN. */
5851 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5852 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5853 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5854 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5855 pFpuCtx->FTW |= RT_BIT(iNewTop);
5856 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5857 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5858 iemFpuRotateStackPush(pFpuCtx);
5859 }
5860 else
5861 {
5862 /* Exception pending - don't change TOP or the register stack. */
5863 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5864 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5865 }
5866}
5867
5868
5869/**
5870 * Worker routine for raising an FPU stack overflow exception on a push.
5871 *
5872 * @param pFpuCtx The FPU context.
5873 */
5874IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5875{
5876 if (pFpuCtx->FCW & X86_FCW_IM)
5877 {
5878 /* Masked overflow. */
5879 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5880 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5881 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5882 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5883 pFpuCtx->FTW |= RT_BIT(iNewTop);
5884 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5885 iemFpuRotateStackPush(pFpuCtx);
5886 }
5887 else
5888 {
5889 /* Exception pending - don't change TOP or the register stack. */
5890 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5891 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5892 }
5893}
5894
5895
5896/**
5897 * Raises a FPU stack overflow exception on a push.
5898 *
5899 * @param pIemCpu The IEM per CPU data.
5900 */
5901DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5902{
5903 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5904 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5905 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5906 iemFpuStackPushOverflowOnly(pFpuCtx);
5907}
5908
5909
5910/**
5911 * Raises a FPU stack overflow exception on a push with a memory operand.
5912 *
5913 * @param pIemCpu The IEM per CPU data.
5914 * @param iEffSeg The effective memory operand selector register.
5915 * @param GCPtrEff The effective memory operand offset.
5916 */
5917DECL_NO_INLINE(IEM_STATIC, void)
5918iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5919{
5920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5921 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5922 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5923 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5924 iemFpuStackPushOverflowOnly(pFpuCtx);
5925}
5926
5927
5928IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5929{
5930 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5931 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5932 if (pFpuCtx->FTW & RT_BIT(iReg))
5933 return VINF_SUCCESS;
5934 return VERR_NOT_FOUND;
5935}
5936
5937
5938IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5939{
5940 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5941 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5942 if (pFpuCtx->FTW & RT_BIT(iReg))
5943 {
5944 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
5945 return VINF_SUCCESS;
5946 }
5947 return VERR_NOT_FOUND;
5948}
5949
5950
5951IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5952 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5953{
5954 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5955 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5956 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5957 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5958 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5959 {
5960 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5961 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
5962 return VINF_SUCCESS;
5963 }
5964 return VERR_NOT_FOUND;
5965}
5966
5967
5968IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5969{
5970 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5971 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5972 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5973 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5974 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5975 {
5976 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5977 return VINF_SUCCESS;
5978 }
5979 return VERR_NOT_FOUND;
5980}
5981
5982
5983/**
5984 * Updates the FPU exception status after FCW is changed.
5985 *
5986 * @param pFpuCtx The FPU context.
5987 */
5988IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
5989{
5990 uint16_t u16Fsw = pFpuCtx->FSW;
5991 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
5992 u16Fsw |= X86_FSW_ES | X86_FSW_B;
5993 else
5994 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
5995 pFpuCtx->FSW = u16Fsw;
5996}
5997
5998
5999/**
6000 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6001 *
6002 * @returns The full FTW.
6003 * @param pFpuCtx The FPU context.
6004 */
6005IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6006{
6007 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6008 uint16_t u16Ftw = 0;
6009 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6010 for (unsigned iSt = 0; iSt < 8; iSt++)
6011 {
6012 unsigned const iReg = (iSt + iTop) & 7;
6013 if (!(u8Ftw & RT_BIT(iReg)))
6014 u16Ftw |= 3 << (iReg * 2); /* empty */
6015 else
6016 {
6017 uint16_t uTag;
6018 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6019 if (pr80Reg->s.uExponent == 0x7fff)
6020 uTag = 2; /* Exponent is all 1's => Special. */
6021 else if (pr80Reg->s.uExponent == 0x0000)
6022 {
6023 if (pr80Reg->s.u64Mantissa == 0x0000)
6024 uTag = 1; /* All bits are zero => Zero. */
6025 else
6026 uTag = 2; /* Must be special. */
6027 }
6028 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6029 uTag = 0; /* Valid. */
6030 else
6031 uTag = 2; /* Must be special. */
6032
6033 u16Ftw |= uTag << (iReg * 2); /* empty */
6034 }
6035 }
6036
6037 return u16Ftw;
6038}
6039
6040
6041/**
6042 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6043 *
6044 * @returns The compressed FTW.
6045 * @param u16FullFtw The full FTW to convert.
6046 */
6047IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6048{
6049 uint8_t u8Ftw = 0;
6050 for (unsigned i = 0; i < 8; i++)
6051 {
6052 if ((u16FullFtw & 3) != 3 /*empty*/)
6053 u8Ftw |= RT_BIT(i);
6054 u16FullFtw >>= 2;
6055 }
6056
6057 return u8Ftw;
6058}
6059
6060/** @} */
6061
6062
6063/** @name Memory access.
6064 *
6065 * @{
6066 */
6067
6068
6069/**
6070 * Updates the IEMCPU::cbWritten counter if applicable.
6071 *
6072 * @param pIemCpu The IEM per CPU data.
6073 * @param fAccess The access being accounted for.
6074 * @param cbMem The access size.
6075 */
6076DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6077{
6078 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6079 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6080 pIemCpu->cbWritten += (uint32_t)cbMem;
6081}
6082
6083
6084/**
6085 * Checks if the given segment can be written to, raise the appropriate
6086 * exception if not.
6087 *
6088 * @returns VBox strict status code.
6089 *
6090 * @param pIemCpu The IEM per CPU data.
6091 * @param pHid Pointer to the hidden register.
6092 * @param iSegReg The register number.
6093 * @param pu64BaseAddr Where to return the base address to use for the
6094 * segment. (In 64-bit code it may differ from the
6095 * base in the hidden segment.)
6096 */
6097IEM_STATIC VBOXSTRICTRC
6098iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6099{
6100 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6101 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6102 else
6103 {
6104 if (!pHid->Attr.n.u1Present)
6105 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6106
6107 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6108 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6109 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6110 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6111 *pu64BaseAddr = pHid->u64Base;
6112 }
6113 return VINF_SUCCESS;
6114}
6115
6116
6117/**
6118 * Checks if the given segment can be read from, raise the appropriate
6119 * exception if not.
6120 *
6121 * @returns VBox strict status code.
6122 *
6123 * @param pIemCpu The IEM per CPU data.
6124 * @param pHid Pointer to the hidden register.
6125 * @param iSegReg The register number.
6126 * @param pu64BaseAddr Where to return the base address to use for the
6127 * segment. (In 64-bit code it may differ from the
6128 * base in the hidden segment.)
6129 */
6130IEM_STATIC VBOXSTRICTRC
6131iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6132{
6133 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6134 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6135 else
6136 {
6137 if (!pHid->Attr.n.u1Present)
6138 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6139
6140 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6141 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6142 *pu64BaseAddr = pHid->u64Base;
6143 }
6144 return VINF_SUCCESS;
6145}
6146
6147
6148/**
6149 * Applies the segment limit, base and attributes.
6150 *
6151 * This may raise a \#GP or \#SS.
6152 *
6153 * @returns VBox strict status code.
6154 *
6155 * @param pIemCpu The IEM per CPU data.
6156 * @param fAccess The kind of access which is being performed.
6157 * @param iSegReg The index of the segment register to apply.
6158 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6159 * TSS, ++).
6160 * @param cbMem The access size.
6161 * @param pGCPtrMem Pointer to the guest memory address to apply
6162 * segmentation to. Input and output parameter.
6163 */
6164IEM_STATIC VBOXSTRICTRC
6165iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6166{
6167 if (iSegReg == UINT8_MAX)
6168 return VINF_SUCCESS;
6169
6170 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6171 switch (pIemCpu->enmCpuMode)
6172 {
6173 case IEMMODE_16BIT:
6174 case IEMMODE_32BIT:
6175 {
6176 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6177 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6178
6179 Assert(pSel->Attr.n.u1Present);
6180 Assert(pSel->Attr.n.u1DescType);
6181 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6182 {
6183 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6184 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6185 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6186
6187 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6188 {
6189 /** @todo CPL check. */
6190 }
6191
6192 /*
6193 * There are two kinds of data selectors, normal and expand down.
6194 */
6195 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6196 {
6197 if ( GCPtrFirst32 > pSel->u32Limit
6198 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6199 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6200 }
6201 else
6202 {
6203 /*
6204 * The upper boundary is defined by the B bit, not the G bit!
6205 */
6206 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6207 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6208 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6209 }
6210 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6211 }
6212 else
6213 {
6214
6215 /*
6216 * Code selector and usually be used to read thru, writing is
6217 * only permitted in real and V8086 mode.
6218 */
6219 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6220 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6221 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6222 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6223 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6224
6225 if ( GCPtrFirst32 > pSel->u32Limit
6226 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6227 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6228
6229 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6230 {
6231 /** @todo CPL check. */
6232 }
6233
6234 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6235 }
6236 return VINF_SUCCESS;
6237 }
6238
6239 case IEMMODE_64BIT:
6240 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6241 *pGCPtrMem += pSel->u64Base;
6242 return VINF_SUCCESS;
6243
6244 default:
6245 AssertFailedReturn(VERR_IEM_IPE_7);
6246 }
6247}
6248
6249
6250/**
6251 * Translates a virtual address to a physical physical address and checks if we
6252 * can access the page as specified.
6253 *
6254 * @param pIemCpu The IEM per CPU data.
6255 * @param GCPtrMem The virtual address.
6256 * @param fAccess The intended access.
6257 * @param pGCPhysMem Where to return the physical address.
6258 */
6259IEM_STATIC VBOXSTRICTRC
6260iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6261{
6262 /** @todo Need a different PGM interface here. We're currently using
6263 * generic / REM interfaces. this won't cut it for R0 & RC. */
6264 RTGCPHYS GCPhys;
6265 uint64_t fFlags;
6266 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6267 if (RT_FAILURE(rc))
6268 {
6269 /** @todo Check unassigned memory in unpaged mode. */
6270 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6271 *pGCPhysMem = NIL_RTGCPHYS;
6272 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6273 }
6274
6275 /* If the page is writable and does not have the no-exec bit set, all
6276 access is allowed. Otherwise we'll have to check more carefully... */
6277 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6278 {
6279 /* Write to read only memory? */
6280 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6281 && !(fFlags & X86_PTE_RW)
6282 && ( pIemCpu->uCpl != 0
6283 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6284 {
6285 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6286 *pGCPhysMem = NIL_RTGCPHYS;
6287 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6288 }
6289
6290 /* Kernel memory accessed by userland? */
6291 if ( !(fFlags & X86_PTE_US)
6292 && pIemCpu->uCpl == 3
6293 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6294 {
6295 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6296 *pGCPhysMem = NIL_RTGCPHYS;
6297 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6298 }
6299
6300 /* Executing non-executable memory? */
6301 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6302 && (fFlags & X86_PTE_PAE_NX)
6303 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6304 {
6305 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6306 *pGCPhysMem = NIL_RTGCPHYS;
6307 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6308 VERR_ACCESS_DENIED);
6309 }
6310 }
6311
6312 /*
6313 * Set the dirty / access flags.
6314 * ASSUMES this is set when the address is translated rather than on committ...
6315 */
6316 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6317 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6318 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6319 {
6320 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6321 AssertRC(rc2);
6322 }
6323
6324 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6325 *pGCPhysMem = GCPhys;
6326 return VINF_SUCCESS;
6327}
6328
6329
6330
6331/**
6332 * Maps a physical page.
6333 *
6334 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6335 * @param pIemCpu The IEM per CPU data.
6336 * @param GCPhysMem The physical address.
6337 * @param fAccess The intended access.
6338 * @param ppvMem Where to return the mapping address.
6339 * @param pLock The PGM lock.
6340 */
6341IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6342{
6343#ifdef IEM_VERIFICATION_MODE_FULL
6344 /* Force the alternative path so we can ignore writes. */
6345 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6346 {
6347 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6348 {
6349 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6350 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6351 if (RT_FAILURE(rc2))
6352 pIemCpu->fProblematicMemory = true;
6353 }
6354 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6355 }
6356#endif
6357#ifdef IEM_LOG_MEMORY_WRITES
6358 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6359 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6360#endif
6361#ifdef IEM_VERIFICATION_MODE_MINIMAL
6362 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6363#endif
6364
6365 /** @todo This API may require some improving later. A private deal with PGM
6366 * regarding locking and unlocking needs to be struct. A couple of TLBs
6367 * living in PGM, but with publicly accessible inlined access methods
6368 * could perhaps be an even better solution. */
6369 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6370 GCPhysMem,
6371 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6372 pIemCpu->fBypassHandlers,
6373 ppvMem,
6374 pLock);
6375 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6376 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6377
6378#ifdef IEM_VERIFICATION_MODE_FULL
6379 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6380 pIemCpu->fProblematicMemory = true;
6381#endif
6382 return rc;
6383}
6384
6385
6386/**
6387 * Unmap a page previously mapped by iemMemPageMap.
6388 *
6389 * @param pIemCpu The IEM per CPU data.
6390 * @param GCPhysMem The physical address.
6391 * @param fAccess The intended access.
6392 * @param pvMem What iemMemPageMap returned.
6393 * @param pLock The PGM lock.
6394 */
6395DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6396{
6397 NOREF(pIemCpu);
6398 NOREF(GCPhysMem);
6399 NOREF(fAccess);
6400 NOREF(pvMem);
6401 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6402}
6403
6404
6405/**
6406 * Looks up a memory mapping entry.
6407 *
6408 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6409 * @param pIemCpu The IEM per CPU data.
6410 * @param pvMem The memory address.
6411 * @param fAccess The access to.
6412 */
6413DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6414{
6415 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6416 if ( pIemCpu->aMemMappings[0].pv == pvMem
6417 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6418 return 0;
6419 if ( pIemCpu->aMemMappings[1].pv == pvMem
6420 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6421 return 1;
6422 if ( pIemCpu->aMemMappings[2].pv == pvMem
6423 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6424 return 2;
6425 return VERR_NOT_FOUND;
6426}
6427
6428
6429/**
6430 * Finds a free memmap entry when using iNextMapping doesn't work.
6431 *
6432 * @returns Memory mapping index, 1024 on failure.
6433 * @param pIemCpu The IEM per CPU data.
6434 */
6435IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6436{
6437 /*
6438 * The easy case.
6439 */
6440 if (pIemCpu->cActiveMappings == 0)
6441 {
6442 pIemCpu->iNextMapping = 1;
6443 return 0;
6444 }
6445
6446 /* There should be enough mappings for all instructions. */
6447 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6448
6449 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6450 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6451 return i;
6452
6453 AssertFailedReturn(1024);
6454}
6455
6456
6457/**
6458 * Commits a bounce buffer that needs writing back and unmaps it.
6459 *
6460 * @returns Strict VBox status code.
6461 * @param pIemCpu The IEM per CPU data.
6462 * @param iMemMap The index of the buffer to commit.
6463 */
6464IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6465{
6466 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6467 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6468
6469 /*
6470 * Do the writing.
6471 */
6472#ifndef IEM_VERIFICATION_MODE_MINIMAL
6473 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6474 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6475 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6476 {
6477 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6478 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6479 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6480 if (!pIemCpu->fBypassHandlers)
6481 {
6482 /*
6483 * Carefully and efficiently dealing with access handler return
6484 * codes make this a little bloated.
6485 */
6486 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6487 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6488 pbBuf,
6489 cbFirst,
6490 PGMACCESSORIGIN_IEM);
6491 if (rcStrict == VINF_SUCCESS)
6492 {
6493 if (cbSecond)
6494 {
6495 rcStrict = PGMPhysWrite(pVM,
6496 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6497 pbBuf + cbFirst,
6498 cbSecond,
6499 PGMACCESSORIGIN_IEM);
6500 if (rcStrict == VINF_SUCCESS)
6501 { /* nothing */ }
6502 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6503 {
6504 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6505 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6506 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6507 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6508 }
6509 else
6510 {
6511 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6512 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6513 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6514 return rcStrict;
6515 }
6516 }
6517 }
6518 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6519 {
6520 if (!cbSecond)
6521 {
6522 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6523 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6524 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6525 }
6526 else
6527 {
6528 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6529 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6530 pbBuf + cbFirst,
6531 cbSecond,
6532 PGMACCESSORIGIN_IEM);
6533 if (rcStrict2 == VINF_SUCCESS)
6534 {
6535 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6536 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6537 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6538 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6539 }
6540 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6541 {
6542 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6543 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6544 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6545 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6546 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6547 }
6548 else
6549 {
6550 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6551 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6552 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6553 return rcStrict2;
6554 }
6555 }
6556 }
6557 else
6558 {
6559 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6560 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6561 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6562 return rcStrict;
6563 }
6564 }
6565 else
6566 {
6567 /*
6568 * No access handlers, much simpler.
6569 */
6570 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6571 if (RT_SUCCESS(rc))
6572 {
6573 if (cbSecond)
6574 {
6575 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6576 if (RT_SUCCESS(rc))
6577 { /* likely */ }
6578 else
6579 {
6580 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6581 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6582 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6583 return rc;
6584 }
6585 }
6586 }
6587 else
6588 {
6589 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6590 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6591 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6592 return rc;
6593 }
6594 }
6595 }
6596#endif
6597
6598#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6599 /*
6600 * Record the write(s).
6601 */
6602 if (!pIemCpu->fNoRem)
6603 {
6604 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6605 if (pEvtRec)
6606 {
6607 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6608 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6609 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6610 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6611 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6612 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6613 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6614 }
6615 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6616 {
6617 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6618 if (pEvtRec)
6619 {
6620 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6621 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6622 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6623 memcpy(pEvtRec->u.RamWrite.ab,
6624 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6625 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6626 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6627 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6628 }
6629 }
6630 }
6631#endif
6632#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6633 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6634 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6635 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6636 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6637 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6638 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6639
6640 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6641 g_cbIemWrote = cbWrote;
6642 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6643#endif
6644
6645 /*
6646 * Free the mapping entry.
6647 */
6648 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6649 Assert(pIemCpu->cActiveMappings != 0);
6650 pIemCpu->cActiveMappings--;
6651 return VINF_SUCCESS;
6652}
6653
6654
6655/**
6656 * iemMemMap worker that deals with a request crossing pages.
6657 */
6658IEM_STATIC VBOXSTRICTRC
6659iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6660{
6661 /*
6662 * Do the address translations.
6663 */
6664 RTGCPHYS GCPhysFirst;
6665 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6666 if (rcStrict != VINF_SUCCESS)
6667 return rcStrict;
6668
6669/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6670 * last byte. */
6671 RTGCPHYS GCPhysSecond;
6672 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6673 if (rcStrict != VINF_SUCCESS)
6674 return rcStrict;
6675 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6676
6677 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6678#ifdef IEM_VERIFICATION_MODE_FULL
6679 /*
6680 * Detect problematic memory when verifying so we can select
6681 * the right execution engine. (TLB: Redo this.)
6682 */
6683 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6684 {
6685 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6686 if (RT_SUCCESS(rc2))
6687 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6688 if (RT_FAILURE(rc2))
6689 pIemCpu->fProblematicMemory = true;
6690 }
6691#endif
6692
6693
6694 /*
6695 * Read in the current memory content if it's a read, execute or partial
6696 * write access.
6697 */
6698 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6699 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6700 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6701
6702 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6703 {
6704 if (!pIemCpu->fBypassHandlers)
6705 {
6706 /*
6707 * Must carefully deal with access handler status codes here,
6708 * makes the code a bit bloated.
6709 */
6710 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6711 if (rcStrict == VINF_SUCCESS)
6712 {
6713 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6714 if (rcStrict == VINF_SUCCESS)
6715 { /*likely */ }
6716 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6717 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6718 else
6719 {
6720 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6721 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6722 return rcStrict;
6723 }
6724 }
6725 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6726 {
6727 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6728 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6729 {
6730 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6731 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6732 }
6733 else
6734 {
6735 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6736 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6737 return rcStrict2;
6738 }
6739 }
6740 else
6741 {
6742 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6743 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6744 return rcStrict;
6745 }
6746 }
6747 else
6748 {
6749 /*
6750 * No informational status codes here, much more straight forward.
6751 */
6752 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6753 if (RT_SUCCESS(rc))
6754 {
6755 Assert(rc == VINF_SUCCESS);
6756 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6757 if (RT_SUCCESS(rc))
6758 Assert(rc == VINF_SUCCESS);
6759 else
6760 {
6761 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6762 return rc;
6763 }
6764 }
6765 else
6766 {
6767 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6768 return rc;
6769 }
6770 }
6771
6772#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6773 if ( !pIemCpu->fNoRem
6774 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6775 {
6776 /*
6777 * Record the reads.
6778 */
6779 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6780 if (pEvtRec)
6781 {
6782 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6783 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6784 pEvtRec->u.RamRead.cb = cbFirstPage;
6785 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6786 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6787 }
6788 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6789 if (pEvtRec)
6790 {
6791 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6792 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6793 pEvtRec->u.RamRead.cb = cbSecondPage;
6794 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6795 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6796 }
6797 }
6798#endif
6799 }
6800#ifdef VBOX_STRICT
6801 else
6802 memset(pbBuf, 0xcc, cbMem);
6803 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6804 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6805#endif
6806
6807 /*
6808 * Commit the bounce buffer entry.
6809 */
6810 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6811 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6812 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6813 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6814 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6815 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6816 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6817 pIemCpu->iNextMapping = iMemMap + 1;
6818 pIemCpu->cActiveMappings++;
6819
6820 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6821 *ppvMem = pbBuf;
6822 return VINF_SUCCESS;
6823}
6824
6825
6826/**
6827 * iemMemMap woker that deals with iemMemPageMap failures.
6828 */
6829IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6830 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6831{
6832 /*
6833 * Filter out conditions we can handle and the ones which shouldn't happen.
6834 */
6835 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6836 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6837 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6838 {
6839 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6840 return rcMap;
6841 }
6842 pIemCpu->cPotentialExits++;
6843
6844 /*
6845 * Read in the current memory content if it's a read, execute or partial
6846 * write access.
6847 */
6848 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6849 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6850 {
6851 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6852 memset(pbBuf, 0xff, cbMem);
6853 else
6854 {
6855 int rc;
6856 if (!pIemCpu->fBypassHandlers)
6857 {
6858 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6859 if (rcStrict == VINF_SUCCESS)
6860 { /* nothing */ }
6861 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6862 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6863 else
6864 {
6865 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6866 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6867 return rcStrict;
6868 }
6869 }
6870 else
6871 {
6872 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6873 if (RT_SUCCESS(rc))
6874 { /* likely */ }
6875 else
6876 {
6877 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6878 GCPhysFirst, rc));
6879 return rc;
6880 }
6881 }
6882 }
6883
6884#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6885 if ( !pIemCpu->fNoRem
6886 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6887 {
6888 /*
6889 * Record the read.
6890 */
6891 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6892 if (pEvtRec)
6893 {
6894 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6895 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6896 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6897 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6898 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6899 }
6900 }
6901#endif
6902 }
6903#ifdef VBOX_STRICT
6904 else
6905 memset(pbBuf, 0xcc, cbMem);
6906#endif
6907#ifdef VBOX_STRICT
6908 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6909 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6910#endif
6911
6912 /*
6913 * Commit the bounce buffer entry.
6914 */
6915 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6916 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6917 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6918 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6919 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6920 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6921 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6922 pIemCpu->iNextMapping = iMemMap + 1;
6923 pIemCpu->cActiveMappings++;
6924
6925 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6926 *ppvMem = pbBuf;
6927 return VINF_SUCCESS;
6928}
6929
6930
6931
6932/**
6933 * Maps the specified guest memory for the given kind of access.
6934 *
6935 * This may be using bounce buffering of the memory if it's crossing a page
6936 * boundary or if there is an access handler installed for any of it. Because
6937 * of lock prefix guarantees, we're in for some extra clutter when this
6938 * happens.
6939 *
6940 * This may raise a \#GP, \#SS, \#PF or \#AC.
6941 *
6942 * @returns VBox strict status code.
6943 *
6944 * @param pIemCpu The IEM per CPU data.
6945 * @param ppvMem Where to return the pointer to the mapped
6946 * memory.
6947 * @param cbMem The number of bytes to map. This is usually 1,
6948 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6949 * string operations it can be up to a page.
6950 * @param iSegReg The index of the segment register to use for
6951 * this access. The base and limits are checked.
6952 * Use UINT8_MAX to indicate that no segmentation
6953 * is required (for IDT, GDT and LDT accesses).
6954 * @param GCPtrMem The address of the guest memory.
6955 * @param fAccess How the memory is being accessed. The
6956 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6957 * how to map the memory, while the
6958 * IEM_ACCESS_WHAT_XXX bit is used when raising
6959 * exceptions.
6960 */
6961IEM_STATIC VBOXSTRICTRC
6962iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
6963{
6964 /*
6965 * Check the input and figure out which mapping entry to use.
6966 */
6967 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6968 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6969
6970 unsigned iMemMap = pIemCpu->iNextMapping;
6971 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
6972 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6973 {
6974 iMemMap = iemMemMapFindFree(pIemCpu);
6975 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
6976 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
6977 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
6978 pIemCpu->aMemMappings[2].fAccess),
6979 VERR_IEM_IPE_9);
6980 }
6981
6982 /*
6983 * Map the memory, checking that we can actually access it. If something
6984 * slightly complicated happens, fall back on bounce buffering.
6985 */
6986 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6987 if (rcStrict != VINF_SUCCESS)
6988 return rcStrict;
6989
6990 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
6991 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6992
6993 RTGCPHYS GCPhysFirst;
6994 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
6995 if (rcStrict != VINF_SUCCESS)
6996 return rcStrict;
6997
6998 void *pvMem;
6999 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7000 if (rcStrict != VINF_SUCCESS)
7001 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
7002
7003 /*
7004 * Fill in the mapping table entry.
7005 */
7006 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7007 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7008 pIemCpu->iNextMapping = iMemMap + 1;
7009 pIemCpu->cActiveMappings++;
7010
7011 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7012 *ppvMem = pvMem;
7013 return VINF_SUCCESS;
7014}
7015
7016
7017/**
7018 * Commits the guest memory if bounce buffered and unmaps it.
7019 *
7020 * @returns Strict VBox status code.
7021 * @param pIemCpu The IEM per CPU data.
7022 * @param pvMem The mapping.
7023 * @param fAccess The kind of access.
7024 */
7025IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7026{
7027 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7028 AssertReturn(iMemMap >= 0, iMemMap);
7029
7030 /* If it's bounce buffered, we may need to write back the buffer. */
7031 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7032 {
7033 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7034 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7035 }
7036 /* Otherwise unlock it. */
7037 else
7038 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7039
7040 /* Free the entry. */
7041 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7042 Assert(pIemCpu->cActiveMappings != 0);
7043 pIemCpu->cActiveMappings--;
7044 return VINF_SUCCESS;
7045}
7046
7047
7048/**
7049 * Rollbacks mappings, releasing page locks and such.
7050 *
7051 * The caller shall only call this after checking cActiveMappings.
7052 *
7053 * @returns Strict VBox status code to pass up.
7054 * @param pIemCpu The IEM per CPU data.
7055 */
7056IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7057{
7058 Assert(pIemCpu->cActiveMappings > 0);
7059
7060 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7061 while (iMemMap-- > 0)
7062 {
7063 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7064 if (fAccess != IEM_ACCESS_INVALID)
7065 {
7066 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7067 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7068 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7069 Assert(pIemCpu->cActiveMappings > 0);
7070 pIemCpu->cActiveMappings--;
7071 }
7072 }
7073}
7074
7075
7076/**
7077 * Fetches a data byte.
7078 *
7079 * @returns Strict VBox status code.
7080 * @param pIemCpu The IEM per CPU data.
7081 * @param pu8Dst Where to return the byte.
7082 * @param iSegReg The index of the segment register to use for
7083 * this access. The base and limits are checked.
7084 * @param GCPtrMem The address of the guest memory.
7085 */
7086IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7087{
7088 /* The lazy approach for now... */
7089 uint8_t const *pu8Src;
7090 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7091 if (rc == VINF_SUCCESS)
7092 {
7093 *pu8Dst = *pu8Src;
7094 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7095 }
7096 return rc;
7097}
7098
7099
7100/**
7101 * Fetches a data word.
7102 *
7103 * @returns Strict VBox status code.
7104 * @param pIemCpu The IEM per CPU data.
7105 * @param pu16Dst Where to return the word.
7106 * @param iSegReg The index of the segment register to use for
7107 * this access. The base and limits are checked.
7108 * @param GCPtrMem The address of the guest memory.
7109 */
7110IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7111{
7112 /* The lazy approach for now... */
7113 uint16_t const *pu16Src;
7114 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7115 if (rc == VINF_SUCCESS)
7116 {
7117 *pu16Dst = *pu16Src;
7118 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7119 }
7120 return rc;
7121}
7122
7123
7124/**
7125 * Fetches a data dword.
7126 *
7127 * @returns Strict VBox status code.
7128 * @param pIemCpu The IEM per CPU data.
7129 * @param pu32Dst Where to return the dword.
7130 * @param iSegReg The index of the segment register to use for
7131 * this access. The base and limits are checked.
7132 * @param GCPtrMem The address of the guest memory.
7133 */
7134IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7135{
7136 /* The lazy approach for now... */
7137 uint32_t const *pu32Src;
7138 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7139 if (rc == VINF_SUCCESS)
7140 {
7141 *pu32Dst = *pu32Src;
7142 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7143 }
7144 return rc;
7145}
7146
7147
7148#ifdef SOME_UNUSED_FUNCTION
7149/**
7150 * Fetches a data dword and sign extends it to a qword.
7151 *
7152 * @returns Strict VBox status code.
7153 * @param pIemCpu The IEM per CPU data.
7154 * @param pu64Dst Where to return the sign extended value.
7155 * @param iSegReg The index of the segment register to use for
7156 * this access. The base and limits are checked.
7157 * @param GCPtrMem The address of the guest memory.
7158 */
7159IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7160{
7161 /* The lazy approach for now... */
7162 int32_t const *pi32Src;
7163 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7164 if (rc == VINF_SUCCESS)
7165 {
7166 *pu64Dst = *pi32Src;
7167 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7168 }
7169#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7170 else
7171 *pu64Dst = 0;
7172#endif
7173 return rc;
7174}
7175#endif
7176
7177
7178/**
7179 * Fetches a data qword.
7180 *
7181 * @returns Strict VBox status code.
7182 * @param pIemCpu The IEM per CPU data.
7183 * @param pu64Dst Where to return the qword.
7184 * @param iSegReg The index of the segment register to use for
7185 * this access. The base and limits are checked.
7186 * @param GCPtrMem The address of the guest memory.
7187 */
7188IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7189{
7190 /* The lazy approach for now... */
7191 uint64_t const *pu64Src;
7192 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7193 if (rc == VINF_SUCCESS)
7194 {
7195 *pu64Dst = *pu64Src;
7196 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7197 }
7198 return rc;
7199}
7200
7201
7202/**
7203 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7204 *
7205 * @returns Strict VBox status code.
7206 * @param pIemCpu The IEM per CPU data.
7207 * @param pu64Dst Where to return the qword.
7208 * @param iSegReg The index of the segment register to use for
7209 * this access. The base and limits are checked.
7210 * @param GCPtrMem The address of the guest memory.
7211 */
7212IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7213{
7214 /* The lazy approach for now... */
7215 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7216 if (RT_UNLIKELY(GCPtrMem & 15))
7217 return iemRaiseGeneralProtectionFault0(pIemCpu);
7218
7219 uint64_t const *pu64Src;
7220 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7221 if (rc == VINF_SUCCESS)
7222 {
7223 *pu64Dst = *pu64Src;
7224 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7225 }
7226 return rc;
7227}
7228
7229
7230/**
7231 * Fetches a data tword.
7232 *
7233 * @returns Strict VBox status code.
7234 * @param pIemCpu The IEM per CPU data.
7235 * @param pr80Dst Where to return the tword.
7236 * @param iSegReg The index of the segment register to use for
7237 * this access. The base and limits are checked.
7238 * @param GCPtrMem The address of the guest memory.
7239 */
7240IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7241{
7242 /* The lazy approach for now... */
7243 PCRTFLOAT80U pr80Src;
7244 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7245 if (rc == VINF_SUCCESS)
7246 {
7247 *pr80Dst = *pr80Src;
7248 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7249 }
7250 return rc;
7251}
7252
7253
7254/**
7255 * Fetches a data dqword (double qword), generally SSE related.
7256 *
7257 * @returns Strict VBox status code.
7258 * @param pIemCpu The IEM per CPU data.
7259 * @param pu128Dst Where to return the qword.
7260 * @param iSegReg The index of the segment register to use for
7261 * this access. The base and limits are checked.
7262 * @param GCPtrMem The address of the guest memory.
7263 */
7264IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7265{
7266 /* The lazy approach for now... */
7267 uint128_t const *pu128Src;
7268 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7269 if (rc == VINF_SUCCESS)
7270 {
7271 *pu128Dst = *pu128Src;
7272 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7273 }
7274 return rc;
7275}
7276
7277
7278/**
7279 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7280 * related.
7281 *
7282 * Raises \#GP(0) if not aligned.
7283 *
7284 * @returns Strict VBox status code.
7285 * @param pIemCpu The IEM per CPU data.
7286 * @param pu128Dst Where to return the qword.
7287 * @param iSegReg The index of the segment register to use for
7288 * this access. The base and limits are checked.
7289 * @param GCPtrMem The address of the guest memory.
7290 */
7291IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7292{
7293 /* The lazy approach for now... */
7294 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7295 if ( (GCPtrMem & 15)
7296 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7297 return iemRaiseGeneralProtectionFault0(pIemCpu);
7298
7299 uint128_t const *pu128Src;
7300 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7301 if (rc == VINF_SUCCESS)
7302 {
7303 *pu128Dst = *pu128Src;
7304 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7305 }
7306 return rc;
7307}
7308
7309
7310
7311
7312/**
7313 * Fetches a descriptor register (lgdt, lidt).
7314 *
7315 * @returns Strict VBox status code.
7316 * @param pIemCpu The IEM per CPU data.
7317 * @param pcbLimit Where to return the limit.
7318 * @param pGCPtrBase Where to return the base.
7319 * @param iSegReg The index of the segment register to use for
7320 * this access. The base and limits are checked.
7321 * @param GCPtrMem The address of the guest memory.
7322 * @param enmOpSize The effective operand size.
7323 */
7324IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7325 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7326{
7327 uint8_t const *pu8Src;
7328 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7329 (void **)&pu8Src,
7330 enmOpSize == IEMMODE_64BIT
7331 ? 2 + 8
7332 : enmOpSize == IEMMODE_32BIT
7333 ? 2 + 4
7334 : 2 + 3,
7335 iSegReg,
7336 GCPtrMem,
7337 IEM_ACCESS_DATA_R);
7338 if (rcStrict == VINF_SUCCESS)
7339 {
7340 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7341 switch (enmOpSize)
7342 {
7343 case IEMMODE_16BIT:
7344 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7345 break;
7346 case IEMMODE_32BIT:
7347 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7348 break;
7349 case IEMMODE_64BIT:
7350 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7351 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7352 break;
7353
7354 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7355 }
7356 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7357 }
7358 return rcStrict;
7359}
7360
7361
7362
7363/**
7364 * Stores a data byte.
7365 *
7366 * @returns Strict VBox status code.
7367 * @param pIemCpu The IEM per CPU data.
7368 * @param iSegReg The index of the segment register to use for
7369 * this access. The base and limits are checked.
7370 * @param GCPtrMem The address of the guest memory.
7371 * @param u8Value The value to store.
7372 */
7373IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7374{
7375 /* The lazy approach for now... */
7376 uint8_t *pu8Dst;
7377 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7378 if (rc == VINF_SUCCESS)
7379 {
7380 *pu8Dst = u8Value;
7381 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7382 }
7383 return rc;
7384}
7385
7386
7387/**
7388 * Stores a data word.
7389 *
7390 * @returns Strict VBox status code.
7391 * @param pIemCpu The IEM per CPU data.
7392 * @param iSegReg The index of the segment register to use for
7393 * this access. The base and limits are checked.
7394 * @param GCPtrMem The address of the guest memory.
7395 * @param u16Value The value to store.
7396 */
7397IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7398{
7399 /* The lazy approach for now... */
7400 uint16_t *pu16Dst;
7401 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7402 if (rc == VINF_SUCCESS)
7403 {
7404 *pu16Dst = u16Value;
7405 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7406 }
7407 return rc;
7408}
7409
7410
7411/**
7412 * Stores a data dword.
7413 *
7414 * @returns Strict VBox status code.
7415 * @param pIemCpu The IEM per CPU data.
7416 * @param iSegReg The index of the segment register to use for
7417 * this access. The base and limits are checked.
7418 * @param GCPtrMem The address of the guest memory.
7419 * @param u32Value The value to store.
7420 */
7421IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7422{
7423 /* The lazy approach for now... */
7424 uint32_t *pu32Dst;
7425 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7426 if (rc == VINF_SUCCESS)
7427 {
7428 *pu32Dst = u32Value;
7429 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7430 }
7431 return rc;
7432}
7433
7434
7435/**
7436 * Stores a data qword.
7437 *
7438 * @returns Strict VBox status code.
7439 * @param pIemCpu The IEM per CPU data.
7440 * @param iSegReg The index of the segment register to use for
7441 * this access. The base and limits are checked.
7442 * @param GCPtrMem The address of the guest memory.
7443 * @param u64Value The value to store.
7444 */
7445IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7446{
7447 /* The lazy approach for now... */
7448 uint64_t *pu64Dst;
7449 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7450 if (rc == VINF_SUCCESS)
7451 {
7452 *pu64Dst = u64Value;
7453 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7454 }
7455 return rc;
7456}
7457
7458
7459/**
7460 * Stores a data dqword.
7461 *
7462 * @returns Strict VBox status code.
7463 * @param pIemCpu The IEM per CPU data.
7464 * @param iSegReg The index of the segment register to use for
7465 * this access. The base and limits are checked.
7466 * @param GCPtrMem The address of the guest memory.
7467 * @param u128Value The value to store.
7468 */
7469IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7470{
7471 /* The lazy approach for now... */
7472 uint128_t *pu128Dst;
7473 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7474 if (rc == VINF_SUCCESS)
7475 {
7476 *pu128Dst = u128Value;
7477 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7478 }
7479 return rc;
7480}
7481
7482
7483/**
7484 * Stores a data dqword, SSE aligned.
7485 *
7486 * @returns Strict VBox status code.
7487 * @param pIemCpu The IEM per CPU data.
7488 * @param iSegReg The index of the segment register to use for
7489 * this access. The base and limits are checked.
7490 * @param GCPtrMem The address of the guest memory.
7491 * @param u128Value The value to store.
7492 */
7493IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7494{
7495 /* The lazy approach for now... */
7496 if ( (GCPtrMem & 15)
7497 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7498 return iemRaiseGeneralProtectionFault0(pIemCpu);
7499
7500 uint128_t *pu128Dst;
7501 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7502 if (rc == VINF_SUCCESS)
7503 {
7504 *pu128Dst = u128Value;
7505 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7506 }
7507 return rc;
7508}
7509
7510
7511/**
7512 * Stores a descriptor register (sgdt, sidt).
7513 *
7514 * @returns Strict VBox status code.
7515 * @param pIemCpu The IEM per CPU data.
7516 * @param cbLimit The limit.
7517 * @param GCPtrBase The base address.
7518 * @param iSegReg The index of the segment register to use for
7519 * this access. The base and limits are checked.
7520 * @param GCPtrMem The address of the guest memory.
7521 * @param enmOpSize The effective operand size.
7522 */
7523IEM_STATIC VBOXSTRICTRC
7524iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7525{
7526 uint8_t *pu8Src;
7527 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7528 (void **)&pu8Src,
7529 enmOpSize == IEMMODE_64BIT
7530 ? 2 + 8
7531 : enmOpSize == IEMMODE_32BIT
7532 ? 2 + 4
7533 : 2 + 3,
7534 iSegReg,
7535 GCPtrMem,
7536 IEM_ACCESS_DATA_W);
7537 if (rcStrict == VINF_SUCCESS)
7538 {
7539 pu8Src[0] = RT_BYTE1(cbLimit);
7540 pu8Src[1] = RT_BYTE2(cbLimit);
7541 pu8Src[2] = RT_BYTE1(GCPtrBase);
7542 pu8Src[3] = RT_BYTE2(GCPtrBase);
7543 pu8Src[4] = RT_BYTE3(GCPtrBase);
7544 if (enmOpSize == IEMMODE_16BIT)
7545 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7546 else
7547 {
7548 pu8Src[5] = RT_BYTE4(GCPtrBase);
7549 if (enmOpSize == IEMMODE_64BIT)
7550 {
7551 pu8Src[6] = RT_BYTE5(GCPtrBase);
7552 pu8Src[7] = RT_BYTE6(GCPtrBase);
7553 pu8Src[8] = RT_BYTE7(GCPtrBase);
7554 pu8Src[9] = RT_BYTE8(GCPtrBase);
7555 }
7556 }
7557 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7558 }
7559 return rcStrict;
7560}
7561
7562
7563/**
7564 * Pushes a word onto the stack.
7565 *
7566 * @returns Strict VBox status code.
7567 * @param pIemCpu The IEM per CPU data.
7568 * @param u16Value The value to push.
7569 */
7570IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7571{
7572 /* Increment the stack pointer. */
7573 uint64_t uNewRsp;
7574 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7575 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7576
7577 /* Write the word the lazy way. */
7578 uint16_t *pu16Dst;
7579 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7580 if (rc == VINF_SUCCESS)
7581 {
7582 *pu16Dst = u16Value;
7583 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7584 }
7585
7586 /* Commit the new RSP value unless we an access handler made trouble. */
7587 if (rc == VINF_SUCCESS)
7588 pCtx->rsp = uNewRsp;
7589
7590 return rc;
7591}
7592
7593
7594/**
7595 * Pushes a dword onto the stack.
7596 *
7597 * @returns Strict VBox status code.
7598 * @param pIemCpu The IEM per CPU data.
7599 * @param u32Value The value to push.
7600 */
7601IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7602{
7603 /* Increment the stack pointer. */
7604 uint64_t uNewRsp;
7605 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7606 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7607
7608 /* Write the dword the lazy way. */
7609 uint32_t *pu32Dst;
7610 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7611 if (rc == VINF_SUCCESS)
7612 {
7613 *pu32Dst = u32Value;
7614 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7615 }
7616
7617 /* Commit the new RSP value unless we an access handler made trouble. */
7618 if (rc == VINF_SUCCESS)
7619 pCtx->rsp = uNewRsp;
7620
7621 return rc;
7622}
7623
7624
7625/**
7626 * Pushes a dword segment register value onto the stack.
7627 *
7628 * @returns Strict VBox status code.
7629 * @param pIemCpu The IEM per CPU data.
7630 * @param u32Value The value to push.
7631 */
7632IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7633{
7634 /* Increment the stack pointer. */
7635 uint64_t uNewRsp;
7636 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7637 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7638
7639 VBOXSTRICTRC rc;
7640 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7641 {
7642 /* The recompiler writes a full dword. */
7643 uint32_t *pu32Dst;
7644 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7645 if (rc == VINF_SUCCESS)
7646 {
7647 *pu32Dst = u32Value;
7648 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7649 }
7650 }
7651 else
7652 {
7653 /* The intel docs talks about zero extending the selector register
7654 value. My actual intel CPU here might be zero extending the value
7655 but it still only writes the lower word... */
7656 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7657 * happens when crossing an electric page boundrary, is the high word checked
7658 * for write accessibility or not? Probably it is. What about segment limits?
7659 * It appears this behavior is also shared with trap error codes.
7660 *
7661 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7662 * ancient hardware when it actually did change. */
7663 uint16_t *pu16Dst;
7664 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7665 if (rc == VINF_SUCCESS)
7666 {
7667 *pu16Dst = (uint16_t)u32Value;
7668 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7669 }
7670 }
7671
7672 /* Commit the new RSP value unless we an access handler made trouble. */
7673 if (rc == VINF_SUCCESS)
7674 pCtx->rsp = uNewRsp;
7675
7676 return rc;
7677}
7678
7679
7680/**
7681 * Pushes a qword onto the stack.
7682 *
7683 * @returns Strict VBox status code.
7684 * @param pIemCpu The IEM per CPU data.
7685 * @param u64Value The value to push.
7686 */
7687IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7688{
7689 /* Increment the stack pointer. */
7690 uint64_t uNewRsp;
7691 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7692 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7693
7694 /* Write the word the lazy way. */
7695 uint64_t *pu64Dst;
7696 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7697 if (rc == VINF_SUCCESS)
7698 {
7699 *pu64Dst = u64Value;
7700 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7701 }
7702
7703 /* Commit the new RSP value unless we an access handler made trouble. */
7704 if (rc == VINF_SUCCESS)
7705 pCtx->rsp = uNewRsp;
7706
7707 return rc;
7708}
7709
7710
7711/**
7712 * Pops a word from the stack.
7713 *
7714 * @returns Strict VBox status code.
7715 * @param pIemCpu The IEM per CPU data.
7716 * @param pu16Value Where to store the popped value.
7717 */
7718IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7719{
7720 /* Increment the stack pointer. */
7721 uint64_t uNewRsp;
7722 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7723 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7724
7725 /* Write the word the lazy way. */
7726 uint16_t const *pu16Src;
7727 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7728 if (rc == VINF_SUCCESS)
7729 {
7730 *pu16Value = *pu16Src;
7731 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7732
7733 /* Commit the new RSP value. */
7734 if (rc == VINF_SUCCESS)
7735 pCtx->rsp = uNewRsp;
7736 }
7737
7738 return rc;
7739}
7740
7741
7742/**
7743 * Pops a dword from the stack.
7744 *
7745 * @returns Strict VBox status code.
7746 * @param pIemCpu The IEM per CPU data.
7747 * @param pu32Value Where to store the popped value.
7748 */
7749IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7750{
7751 /* Increment the stack pointer. */
7752 uint64_t uNewRsp;
7753 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7754 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7755
7756 /* Write the word the lazy way. */
7757 uint32_t const *pu32Src;
7758 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7759 if (rc == VINF_SUCCESS)
7760 {
7761 *pu32Value = *pu32Src;
7762 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7763
7764 /* Commit the new RSP value. */
7765 if (rc == VINF_SUCCESS)
7766 pCtx->rsp = uNewRsp;
7767 }
7768
7769 return rc;
7770}
7771
7772
7773/**
7774 * Pops a qword from the stack.
7775 *
7776 * @returns Strict VBox status code.
7777 * @param pIemCpu The IEM per CPU data.
7778 * @param pu64Value Where to store the popped value.
7779 */
7780IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7781{
7782 /* Increment the stack pointer. */
7783 uint64_t uNewRsp;
7784 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7785 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7786
7787 /* Write the word the lazy way. */
7788 uint64_t const *pu64Src;
7789 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7790 if (rc == VINF_SUCCESS)
7791 {
7792 *pu64Value = *pu64Src;
7793 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7794
7795 /* Commit the new RSP value. */
7796 if (rc == VINF_SUCCESS)
7797 pCtx->rsp = uNewRsp;
7798 }
7799
7800 return rc;
7801}
7802
7803
7804/**
7805 * Pushes a word onto the stack, using a temporary stack pointer.
7806 *
7807 * @returns Strict VBox status code.
7808 * @param pIemCpu The IEM per CPU data.
7809 * @param u16Value The value to push.
7810 * @param pTmpRsp Pointer to the temporary stack pointer.
7811 */
7812IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7813{
7814 /* Increment the stack pointer. */
7815 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7816 RTUINT64U NewRsp = *pTmpRsp;
7817 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7818
7819 /* Write the word the lazy way. */
7820 uint16_t *pu16Dst;
7821 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7822 if (rc == VINF_SUCCESS)
7823 {
7824 *pu16Dst = u16Value;
7825 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7826 }
7827
7828 /* Commit the new RSP value unless we an access handler made trouble. */
7829 if (rc == VINF_SUCCESS)
7830 *pTmpRsp = NewRsp;
7831
7832 return rc;
7833}
7834
7835
7836/**
7837 * Pushes a dword onto the stack, using a temporary stack pointer.
7838 *
7839 * @returns Strict VBox status code.
7840 * @param pIemCpu The IEM per CPU data.
7841 * @param u32Value The value to push.
7842 * @param pTmpRsp Pointer to the temporary stack pointer.
7843 */
7844IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7845{
7846 /* Increment the stack pointer. */
7847 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7848 RTUINT64U NewRsp = *pTmpRsp;
7849 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7850
7851 /* Write the word the lazy way. */
7852 uint32_t *pu32Dst;
7853 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7854 if (rc == VINF_SUCCESS)
7855 {
7856 *pu32Dst = u32Value;
7857 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7858 }
7859
7860 /* Commit the new RSP value unless we an access handler made trouble. */
7861 if (rc == VINF_SUCCESS)
7862 *pTmpRsp = NewRsp;
7863
7864 return rc;
7865}
7866
7867
7868/**
7869 * Pushes a dword onto the stack, using a temporary stack pointer.
7870 *
7871 * @returns Strict VBox status code.
7872 * @param pIemCpu The IEM per CPU data.
7873 * @param u64Value The value to push.
7874 * @param pTmpRsp Pointer to the temporary stack pointer.
7875 */
7876IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7877{
7878 /* Increment the stack pointer. */
7879 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7880 RTUINT64U NewRsp = *pTmpRsp;
7881 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7882
7883 /* Write the word the lazy way. */
7884 uint64_t *pu64Dst;
7885 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7886 if (rc == VINF_SUCCESS)
7887 {
7888 *pu64Dst = u64Value;
7889 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7890 }
7891
7892 /* Commit the new RSP value unless we an access handler made trouble. */
7893 if (rc == VINF_SUCCESS)
7894 *pTmpRsp = NewRsp;
7895
7896 return rc;
7897}
7898
7899
7900/**
7901 * Pops a word from the stack, using a temporary stack pointer.
7902 *
7903 * @returns Strict VBox status code.
7904 * @param pIemCpu The IEM per CPU data.
7905 * @param pu16Value Where to store the popped value.
7906 * @param pTmpRsp Pointer to the temporary stack pointer.
7907 */
7908IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7909{
7910 /* Increment the stack pointer. */
7911 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7912 RTUINT64U NewRsp = *pTmpRsp;
7913 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7914
7915 /* Write the word the lazy way. */
7916 uint16_t const *pu16Src;
7917 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7918 if (rc == VINF_SUCCESS)
7919 {
7920 *pu16Value = *pu16Src;
7921 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7922
7923 /* Commit the new RSP value. */
7924 if (rc == VINF_SUCCESS)
7925 *pTmpRsp = NewRsp;
7926 }
7927
7928 return rc;
7929}
7930
7931
7932/**
7933 * Pops a dword from the stack, using a temporary stack pointer.
7934 *
7935 * @returns Strict VBox status code.
7936 * @param pIemCpu The IEM per CPU data.
7937 * @param pu32Value Where to store the popped value.
7938 * @param pTmpRsp Pointer to the temporary stack pointer.
7939 */
7940IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7941{
7942 /* Increment the stack pointer. */
7943 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7944 RTUINT64U NewRsp = *pTmpRsp;
7945 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7946
7947 /* Write the word the lazy way. */
7948 uint32_t const *pu32Src;
7949 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7950 if (rc == VINF_SUCCESS)
7951 {
7952 *pu32Value = *pu32Src;
7953 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7954
7955 /* Commit the new RSP value. */
7956 if (rc == VINF_SUCCESS)
7957 *pTmpRsp = NewRsp;
7958 }
7959
7960 return rc;
7961}
7962
7963
7964/**
7965 * Pops a qword from the stack, using a temporary stack pointer.
7966 *
7967 * @returns Strict VBox status code.
7968 * @param pIemCpu The IEM per CPU data.
7969 * @param pu64Value Where to store the popped value.
7970 * @param pTmpRsp Pointer to the temporary stack pointer.
7971 */
7972IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
7973{
7974 /* Increment the stack pointer. */
7975 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7976 RTUINT64U NewRsp = *pTmpRsp;
7977 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7978
7979 /* Write the word the lazy way. */
7980 uint64_t const *pu64Src;
7981 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7982 if (rcStrict == VINF_SUCCESS)
7983 {
7984 *pu64Value = *pu64Src;
7985 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7986
7987 /* Commit the new RSP value. */
7988 if (rcStrict == VINF_SUCCESS)
7989 *pTmpRsp = NewRsp;
7990 }
7991
7992 return rcStrict;
7993}
7994
7995
7996/**
7997 * Begin a special stack push (used by interrupt, exceptions and such).
7998 *
7999 * This will raise \#SS or \#PF if appropriate.
8000 *
8001 * @returns Strict VBox status code.
8002 * @param pIemCpu The IEM per CPU data.
8003 * @param cbMem The number of bytes to push onto the stack.
8004 * @param ppvMem Where to return the pointer to the stack memory.
8005 * As with the other memory functions this could be
8006 * direct access or bounce buffered access, so
8007 * don't commit register until the commit call
8008 * succeeds.
8009 * @param puNewRsp Where to return the new RSP value. This must be
8010 * passed unchanged to
8011 * iemMemStackPushCommitSpecial().
8012 */
8013IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8014{
8015 Assert(cbMem < UINT8_MAX);
8016 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8017 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8018 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8019}
8020
8021
8022/**
8023 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8024 *
8025 * This will update the rSP.
8026 *
8027 * @returns Strict VBox status code.
8028 * @param pIemCpu The IEM per CPU data.
8029 * @param pvMem The pointer returned by
8030 * iemMemStackPushBeginSpecial().
8031 * @param uNewRsp The new RSP value returned by
8032 * iemMemStackPushBeginSpecial().
8033 */
8034IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8035{
8036 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8037 if (rcStrict == VINF_SUCCESS)
8038 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8039 return rcStrict;
8040}
8041
8042
8043/**
8044 * Begin a special stack pop (used by iret, retf and such).
8045 *
8046 * This will raise \#SS or \#PF if appropriate.
8047 *
8048 * @returns Strict VBox status code.
8049 * @param pIemCpu The IEM per CPU data.
8050 * @param cbMem The number of bytes to push onto the stack.
8051 * @param ppvMem Where to return the pointer to the stack memory.
8052 * @param puNewRsp Where to return the new RSP value. This must be
8053 * passed unchanged to
8054 * iemMemStackPopCommitSpecial() or applied
8055 * manually if iemMemStackPopDoneSpecial() is used.
8056 */
8057IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8058{
8059 Assert(cbMem < UINT8_MAX);
8060 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8061 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8062 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8063}
8064
8065
8066/**
8067 * Continue a special stack pop (used by iret and retf).
8068 *
8069 * This will raise \#SS or \#PF if appropriate.
8070 *
8071 * @returns Strict VBox status code.
8072 * @param pIemCpu The IEM per CPU data.
8073 * @param cbMem The number of bytes to push onto the stack.
8074 * @param ppvMem Where to return the pointer to the stack memory.
8075 * @param puNewRsp Where to return the new RSP value. This must be
8076 * passed unchanged to
8077 * iemMemStackPopCommitSpecial() or applied
8078 * manually if iemMemStackPopDoneSpecial() is used.
8079 */
8080IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8081{
8082 Assert(cbMem < UINT8_MAX);
8083 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8084 RTUINT64U NewRsp;
8085 NewRsp.u = *puNewRsp;
8086 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8087 *puNewRsp = NewRsp.u;
8088 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8089}
8090
8091
8092/**
8093 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8094 *
8095 * This will update the rSP.
8096 *
8097 * @returns Strict VBox status code.
8098 * @param pIemCpu The IEM per CPU data.
8099 * @param pvMem The pointer returned by
8100 * iemMemStackPopBeginSpecial().
8101 * @param uNewRsp The new RSP value returned by
8102 * iemMemStackPopBeginSpecial().
8103 */
8104IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8105{
8106 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8107 if (rcStrict == VINF_SUCCESS)
8108 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8109 return rcStrict;
8110}
8111
8112
8113/**
8114 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8115 * iemMemStackPopContinueSpecial).
8116 *
8117 * The caller will manually commit the rSP.
8118 *
8119 * @returns Strict VBox status code.
8120 * @param pIemCpu The IEM per CPU data.
8121 * @param pvMem The pointer returned by
8122 * iemMemStackPopBeginSpecial() or
8123 * iemMemStackPopContinueSpecial().
8124 */
8125IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8126{
8127 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8128}
8129
8130
8131/**
8132 * Fetches a system table byte.
8133 *
8134 * @returns Strict VBox status code.
8135 * @param pIemCpu The IEM per CPU data.
8136 * @param pbDst Where to return the byte.
8137 * @param iSegReg The index of the segment register to use for
8138 * this access. The base and limits are checked.
8139 * @param GCPtrMem The address of the guest memory.
8140 */
8141IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8142{
8143 /* The lazy approach for now... */
8144 uint8_t const *pbSrc;
8145 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8146 if (rc == VINF_SUCCESS)
8147 {
8148 *pbDst = *pbSrc;
8149 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8150 }
8151 return rc;
8152}
8153
8154
8155/**
8156 * Fetches a system table word.
8157 *
8158 * @returns Strict VBox status code.
8159 * @param pIemCpu The IEM per CPU data.
8160 * @param pu16Dst Where to return the word.
8161 * @param iSegReg The index of the segment register to use for
8162 * this access. The base and limits are checked.
8163 * @param GCPtrMem The address of the guest memory.
8164 */
8165IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8166{
8167 /* The lazy approach for now... */
8168 uint16_t const *pu16Src;
8169 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8170 if (rc == VINF_SUCCESS)
8171 {
8172 *pu16Dst = *pu16Src;
8173 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8174 }
8175 return rc;
8176}
8177
8178
8179/**
8180 * Fetches a system table dword.
8181 *
8182 * @returns Strict VBox status code.
8183 * @param pIemCpu The IEM per CPU data.
8184 * @param pu32Dst Where to return the dword.
8185 * @param iSegReg The index of the segment register to use for
8186 * this access. The base and limits are checked.
8187 * @param GCPtrMem The address of the guest memory.
8188 */
8189IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8190{
8191 /* The lazy approach for now... */
8192 uint32_t const *pu32Src;
8193 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8194 if (rc == VINF_SUCCESS)
8195 {
8196 *pu32Dst = *pu32Src;
8197 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8198 }
8199 return rc;
8200}
8201
8202
8203/**
8204 * Fetches a system table qword.
8205 *
8206 * @returns Strict VBox status code.
8207 * @param pIemCpu The IEM per CPU data.
8208 * @param pu64Dst Where to return the qword.
8209 * @param iSegReg The index of the segment register to use for
8210 * this access. The base and limits are checked.
8211 * @param GCPtrMem The address of the guest memory.
8212 */
8213IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8214{
8215 /* The lazy approach for now... */
8216 uint64_t const *pu64Src;
8217 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8218 if (rc == VINF_SUCCESS)
8219 {
8220 *pu64Dst = *pu64Src;
8221 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8222 }
8223 return rc;
8224}
8225
8226
8227/**
8228 * Fetches a descriptor table entry with caller specified error code.
8229 *
8230 * @returns Strict VBox status code.
8231 * @param pIemCpu The IEM per CPU.
8232 * @param pDesc Where to return the descriptor table entry.
8233 * @param uSel The selector which table entry to fetch.
8234 * @param uXcpt The exception to raise on table lookup error.
8235 * @param uErrorCode The error code associated with the exception.
8236 */
8237IEM_STATIC VBOXSTRICTRC
8238iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8239{
8240 AssertPtr(pDesc);
8241 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8242
8243 /** @todo did the 286 require all 8 bytes to be accessible? */
8244 /*
8245 * Get the selector table base and check bounds.
8246 */
8247 RTGCPTR GCPtrBase;
8248 if (uSel & X86_SEL_LDT)
8249 {
8250 if ( !pCtx->ldtr.Attr.n.u1Present
8251 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8252 {
8253 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8254 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8255 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8256 uErrorCode, 0);
8257 }
8258
8259 Assert(pCtx->ldtr.Attr.n.u1Present);
8260 GCPtrBase = pCtx->ldtr.u64Base;
8261 }
8262 else
8263 {
8264 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8265 {
8266 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8267 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8268 uErrorCode, 0);
8269 }
8270 GCPtrBase = pCtx->gdtr.pGdt;
8271 }
8272
8273 /*
8274 * Read the legacy descriptor and maybe the long mode extensions if
8275 * required.
8276 */
8277 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8278 if (rcStrict == VINF_SUCCESS)
8279 {
8280 if ( !IEM_IS_LONG_MODE(pIemCpu)
8281 || pDesc->Legacy.Gen.u1DescType)
8282 pDesc->Long.au64[1] = 0;
8283 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8284 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8285 else
8286 {
8287 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8288 /** @todo is this the right exception? */
8289 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8290 }
8291 }
8292 return rcStrict;
8293}
8294
8295
8296/**
8297 * Fetches a descriptor table entry.
8298 *
8299 * @returns Strict VBox status code.
8300 * @param pIemCpu The IEM per CPU.
8301 * @param pDesc Where to return the descriptor table entry.
8302 * @param uSel The selector which table entry to fetch.
8303 * @param uXcpt The exception to raise on table lookup error.
8304 */
8305IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8306{
8307 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8308}
8309
8310
8311/**
8312 * Fakes a long mode stack selector for SS = 0.
8313 *
8314 * @param pDescSs Where to return the fake stack descriptor.
8315 * @param uDpl The DPL we want.
8316 */
8317IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8318{
8319 pDescSs->Long.au64[0] = 0;
8320 pDescSs->Long.au64[1] = 0;
8321 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8322 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8323 pDescSs->Long.Gen.u2Dpl = uDpl;
8324 pDescSs->Long.Gen.u1Present = 1;
8325 pDescSs->Long.Gen.u1Long = 1;
8326}
8327
8328
8329/**
8330 * Marks the selector descriptor as accessed (only non-system descriptors).
8331 *
8332 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8333 * will therefore skip the limit checks.
8334 *
8335 * @returns Strict VBox status code.
8336 * @param pIemCpu The IEM per CPU.
8337 * @param uSel The selector.
8338 */
8339IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8340{
8341 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8342
8343 /*
8344 * Get the selector table base and calculate the entry address.
8345 */
8346 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8347 ? pCtx->ldtr.u64Base
8348 : pCtx->gdtr.pGdt;
8349 GCPtr += uSel & X86_SEL_MASK;
8350
8351 /*
8352 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8353 * ugly stuff to avoid this. This will make sure it's an atomic access
8354 * as well more or less remove any question about 8-bit or 32-bit accesss.
8355 */
8356 VBOXSTRICTRC rcStrict;
8357 uint32_t volatile *pu32;
8358 if ((GCPtr & 3) == 0)
8359 {
8360 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8361 GCPtr += 2 + 2;
8362 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8363 if (rcStrict != VINF_SUCCESS)
8364 return rcStrict;
8365 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8366 }
8367 else
8368 {
8369 /* The misaligned GDT/LDT case, map the whole thing. */
8370 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8371 if (rcStrict != VINF_SUCCESS)
8372 return rcStrict;
8373 switch ((uintptr_t)pu32 & 3)
8374 {
8375 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8376 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8377 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8378 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8379 }
8380 }
8381
8382 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8383}
8384
8385/** @} */
8386
8387
8388/*
8389 * Include the C/C++ implementation of instruction.
8390 */
8391#include "IEMAllCImpl.cpp.h"
8392
8393
8394
8395/** @name "Microcode" macros.
8396 *
8397 * The idea is that we should be able to use the same code to interpret
8398 * instructions as well as recompiler instructions. Thus this obfuscation.
8399 *
8400 * @{
8401 */
8402#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8403#define IEM_MC_END() }
8404#define IEM_MC_PAUSE() do {} while (0)
8405#define IEM_MC_CONTINUE() do {} while (0)
8406
8407/** Internal macro. */
8408#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8409 do \
8410 { \
8411 VBOXSTRICTRC rcStrict2 = a_Expr; \
8412 if (rcStrict2 != VINF_SUCCESS) \
8413 return rcStrict2; \
8414 } while (0)
8415
8416#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8417#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8418#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8419#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8420#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8421#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8422#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8423
8424#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8425#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8426 do { \
8427 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8428 return iemRaiseDeviceNotAvailable(pIemCpu); \
8429 } while (0)
8430#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8431 do { \
8432 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8433 return iemRaiseMathFault(pIemCpu); \
8434 } while (0)
8435#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8436 do { \
8437 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8438 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8439 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8440 return iemRaiseUndefinedOpcode(pIemCpu); \
8441 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8442 return iemRaiseDeviceNotAvailable(pIemCpu); \
8443 } while (0)
8444#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8445 do { \
8446 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8447 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8448 return iemRaiseUndefinedOpcode(pIemCpu); \
8449 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8450 return iemRaiseDeviceNotAvailable(pIemCpu); \
8451 } while (0)
8452#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8453 do { \
8454 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8455 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8456 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8457 return iemRaiseUndefinedOpcode(pIemCpu); \
8458 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8459 return iemRaiseDeviceNotAvailable(pIemCpu); \
8460 } while (0)
8461#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8462 do { \
8463 if (pIemCpu->uCpl != 0) \
8464 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8465 } while (0)
8466
8467
8468#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8469#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8470#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8471#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8472#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8473#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8474#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8475 uint32_t a_Name; \
8476 uint32_t *a_pName = &a_Name
8477#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8478 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8479
8480#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8481#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8482
8483#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8484#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8485#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8486#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8487#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8488#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8489#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8490#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8491#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8492#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8493#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8494#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8495#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8496#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8497#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8498#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8499#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8500#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8501#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8502#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8503#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8504#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8505#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8506#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8507#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8508#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8509#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8510#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8511#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8512/** @note Not for IOPL or IF testing or modification. */
8513#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8514#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8515#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8516#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8517
8518#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8519#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8520#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8521#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8522#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8523#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8524#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8525#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8526#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8527#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8528#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8529 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8530
8531#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8532#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8533/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8534 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8535#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8536#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8537/** @note Not for IOPL or IF testing or modification. */
8538#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8539
8540#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8541#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8542#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8543 do { \
8544 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8545 *pu32Reg += (a_u32Value); \
8546 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8547 } while (0)
8548#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8549
8550#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8551#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8552#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8553 do { \
8554 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8555 *pu32Reg -= (a_u32Value); \
8556 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8557 } while (0)
8558#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8559
8560#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8561#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8562#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8563#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8564#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8565#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8566#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8567
8568#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8569#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8570#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8571#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8572
8573#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8574#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8575#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8576
8577#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8578#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8579
8580#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8581#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8582#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8583
8584#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8585#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8586#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8587
8588#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8589
8590#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8591
8592#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8593#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8594#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8595 do { \
8596 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8597 *pu32Reg &= (a_u32Value); \
8598 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8599 } while (0)
8600#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8601
8602#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8603#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8604#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8605 do { \
8606 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8607 *pu32Reg |= (a_u32Value); \
8608 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8609 } while (0)
8610#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8611
8612
8613/** @note Not for IOPL or IF modification. */
8614#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8615/** @note Not for IOPL or IF modification. */
8616#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8617/** @note Not for IOPL or IF modification. */
8618#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8619
8620#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8621
8622
8623#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8624 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8625#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8626 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8627#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8628 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8629#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8630 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8631#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8632 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8633#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8634 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8635#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8636 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8637
8638#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8639 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8640#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8641 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8642#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8643 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8644#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8645 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8646#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8647 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8648 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8649 } while (0)
8650#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8651 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8652 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8653 } while (0)
8654#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8655 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8656#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8657 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8658#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8659 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8660
8661#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8662 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8663#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8664 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8665#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8666 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8667
8668#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8669 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8670#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8671 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8672#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8673 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8674
8675#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8676 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8677#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8678 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8679#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8680 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8681
8682#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8684
8685#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8687#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8688 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8689#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8690 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8691#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8693
8694#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8696#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8698#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8700
8701#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8703#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8704 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8705
8706
8707
8708#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8709 do { \
8710 uint8_t u8Tmp; \
8711 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8712 (a_u16Dst) = u8Tmp; \
8713 } while (0)
8714#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8715 do { \
8716 uint8_t u8Tmp; \
8717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8718 (a_u32Dst) = u8Tmp; \
8719 } while (0)
8720#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8721 do { \
8722 uint8_t u8Tmp; \
8723 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8724 (a_u64Dst) = u8Tmp; \
8725 } while (0)
8726#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8727 do { \
8728 uint16_t u16Tmp; \
8729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8730 (a_u32Dst) = u16Tmp; \
8731 } while (0)
8732#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8733 do { \
8734 uint16_t u16Tmp; \
8735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8736 (a_u64Dst) = u16Tmp; \
8737 } while (0)
8738#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8739 do { \
8740 uint32_t u32Tmp; \
8741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8742 (a_u64Dst) = u32Tmp; \
8743 } while (0)
8744
8745#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8746 do { \
8747 uint8_t u8Tmp; \
8748 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8749 (a_u16Dst) = (int8_t)u8Tmp; \
8750 } while (0)
8751#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8752 do { \
8753 uint8_t u8Tmp; \
8754 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8755 (a_u32Dst) = (int8_t)u8Tmp; \
8756 } while (0)
8757#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8758 do { \
8759 uint8_t u8Tmp; \
8760 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8761 (a_u64Dst) = (int8_t)u8Tmp; \
8762 } while (0)
8763#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8764 do { \
8765 uint16_t u16Tmp; \
8766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8767 (a_u32Dst) = (int16_t)u16Tmp; \
8768 } while (0)
8769#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8770 do { \
8771 uint16_t u16Tmp; \
8772 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8773 (a_u64Dst) = (int16_t)u16Tmp; \
8774 } while (0)
8775#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8776 do { \
8777 uint32_t u32Tmp; \
8778 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8779 (a_u64Dst) = (int32_t)u32Tmp; \
8780 } while (0)
8781
8782#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8783 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8784#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8785 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8786#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8787 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8788#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8789 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8790
8791#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8792 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8793#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8794 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8795#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8796 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8797#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8798 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8799
8800#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8801#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8802#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8803#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8804#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8805#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8806#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8807 do { \
8808 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8809 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8810 } while (0)
8811
8812#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8813 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8814#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8815 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8816
8817
8818#define IEM_MC_PUSH_U16(a_u16Value) \
8819 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8820#define IEM_MC_PUSH_U32(a_u32Value) \
8821 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8822#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8823 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8824#define IEM_MC_PUSH_U64(a_u64Value) \
8825 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8826
8827#define IEM_MC_POP_U16(a_pu16Value) \
8828 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8829#define IEM_MC_POP_U32(a_pu32Value) \
8830 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8831#define IEM_MC_POP_U64(a_pu64Value) \
8832 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8833
8834/** Maps guest memory for direct or bounce buffered access.
8835 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8836 * @remarks May return.
8837 */
8838#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8839 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8840
8841/** Maps guest memory for direct or bounce buffered access.
8842 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8843 * @remarks May return.
8844 */
8845#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8846 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8847
8848/** Commits the memory and unmaps the guest memory.
8849 * @remarks May return.
8850 */
8851#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8852 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8853
8854/** Commits the memory and unmaps the guest memory unless the FPU status word
8855 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8856 * that would cause FLD not to store.
8857 *
8858 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8859 * store, while \#P will not.
8860 *
8861 * @remarks May in theory return - for now.
8862 */
8863#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8864 do { \
8865 if ( !(a_u16FSW & X86_FSW_ES) \
8866 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8867 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8868 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8869 } while (0)
8870
8871/** Calculate efficient address from R/M. */
8872#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8873 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8874
8875#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8876#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8877#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8878#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8879#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8880#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8881#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8882
8883/**
8884 * Defers the rest of the instruction emulation to a C implementation routine
8885 * and returns, only taking the standard parameters.
8886 *
8887 * @param a_pfnCImpl The pointer to the C routine.
8888 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8889 */
8890#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8891
8892/**
8893 * Defers the rest of instruction emulation to a C implementation routine and
8894 * returns, taking one argument in addition to the standard ones.
8895 *
8896 * @param a_pfnCImpl The pointer to the C routine.
8897 * @param a0 The argument.
8898 */
8899#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8900
8901/**
8902 * Defers the rest of the instruction emulation to a C implementation routine
8903 * and returns, taking two arguments in addition to the standard ones.
8904 *
8905 * @param a_pfnCImpl The pointer to the C routine.
8906 * @param a0 The first extra argument.
8907 * @param a1 The second extra argument.
8908 */
8909#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8910
8911/**
8912 * Defers the rest of the instruction emulation to a C implementation routine
8913 * and returns, taking three arguments in addition to the standard ones.
8914 *
8915 * @param a_pfnCImpl The pointer to the C routine.
8916 * @param a0 The first extra argument.
8917 * @param a1 The second extra argument.
8918 * @param a2 The third extra argument.
8919 */
8920#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8921
8922/**
8923 * Defers the rest of the instruction emulation to a C implementation routine
8924 * and returns, taking four arguments in addition to the standard ones.
8925 *
8926 * @param a_pfnCImpl The pointer to the C routine.
8927 * @param a0 The first extra argument.
8928 * @param a1 The second extra argument.
8929 * @param a2 The third extra argument.
8930 * @param a3 The fourth extra argument.
8931 */
8932#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8933
8934/**
8935 * Defers the rest of the instruction emulation to a C implementation routine
8936 * and returns, taking two arguments in addition to the standard ones.
8937 *
8938 * @param a_pfnCImpl The pointer to the C routine.
8939 * @param a0 The first extra argument.
8940 * @param a1 The second extra argument.
8941 * @param a2 The third extra argument.
8942 * @param a3 The fourth extra argument.
8943 * @param a4 The fifth extra argument.
8944 */
8945#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8946
8947/**
8948 * Defers the entire instruction emulation to a C implementation routine and
8949 * returns, only taking the standard parameters.
8950 *
8951 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8952 *
8953 * @param a_pfnCImpl The pointer to the C routine.
8954 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8955 */
8956#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8957
8958/**
8959 * Defers the entire instruction emulation to a C implementation routine and
8960 * returns, taking one argument in addition to the standard ones.
8961 *
8962 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8963 *
8964 * @param a_pfnCImpl The pointer to the C routine.
8965 * @param a0 The argument.
8966 */
8967#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8968
8969/**
8970 * Defers the entire instruction emulation to a C implementation routine and
8971 * returns, taking two arguments in addition to the standard ones.
8972 *
8973 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8974 *
8975 * @param a_pfnCImpl The pointer to the C routine.
8976 * @param a0 The first extra argument.
8977 * @param a1 The second extra argument.
8978 */
8979#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8980
8981/**
8982 * Defers the entire instruction emulation to a C implementation routine and
8983 * returns, taking three arguments in addition to the standard ones.
8984 *
8985 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8986 *
8987 * @param a_pfnCImpl The pointer to the C routine.
8988 * @param a0 The first extra argument.
8989 * @param a1 The second extra argument.
8990 * @param a2 The third extra argument.
8991 */
8992#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8993
8994/**
8995 * Calls a FPU assembly implementation taking one visible argument.
8996 *
8997 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8998 * @param a0 The first extra argument.
8999 */
9000#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
9001 do { \
9002 iemFpuPrepareUsage(pIemCpu); \
9003 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
9004 } while (0)
9005
9006/**
9007 * Calls a FPU assembly implementation taking two visible arguments.
9008 *
9009 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9010 * @param a0 The first extra argument.
9011 * @param a1 The second extra argument.
9012 */
9013#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9014 do { \
9015 iemFpuPrepareUsage(pIemCpu); \
9016 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9017 } while (0)
9018
9019/**
9020 * Calls a FPU assembly implementation taking three visible arguments.
9021 *
9022 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9023 * @param a0 The first extra argument.
9024 * @param a1 The second extra argument.
9025 * @param a2 The third extra argument.
9026 */
9027#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9028 do { \
9029 iemFpuPrepareUsage(pIemCpu); \
9030 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9031 } while (0)
9032
9033#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9034 do { \
9035 (a_FpuData).FSW = (a_FSW); \
9036 (a_FpuData).r80Result = *(a_pr80Value); \
9037 } while (0)
9038
9039/** Pushes FPU result onto the stack. */
9040#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9041 iemFpuPushResult(pIemCpu, &a_FpuData)
9042/** Pushes FPU result onto the stack and sets the FPUDP. */
9043#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9044 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9045
9046/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9047#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9048 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9049
9050/** Stores FPU result in a stack register. */
9051#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9052 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9053/** Stores FPU result in a stack register and pops the stack. */
9054#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9055 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9056/** Stores FPU result in a stack register and sets the FPUDP. */
9057#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9058 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9059/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9060 * stack. */
9061#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9062 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9063
9064/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9065#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9066 iemFpuUpdateOpcodeAndIp(pIemCpu)
9067/** Free a stack register (for FFREE and FFREEP). */
9068#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9069 iemFpuStackFree(pIemCpu, a_iStReg)
9070/** Increment the FPU stack pointer. */
9071#define IEM_MC_FPU_STACK_INC_TOP() \
9072 iemFpuStackIncTop(pIemCpu)
9073/** Decrement the FPU stack pointer. */
9074#define IEM_MC_FPU_STACK_DEC_TOP() \
9075 iemFpuStackDecTop(pIemCpu)
9076
9077/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9078#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9079 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9080/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9081#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9082 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9083/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9084#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9085 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9086/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9087#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9088 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9089/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9090 * stack. */
9091#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9092 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9093/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9094#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9095 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9096
9097/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9098#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9099 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9100/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9101 * stack. */
9102#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9103 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9104/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9105 * FPUDS. */
9106#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9107 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9108/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9109 * FPUDS. Pops stack. */
9110#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9111 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9112/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9113 * stack twice. */
9114#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9115 iemFpuStackUnderflowThenPopPop(pIemCpu)
9116/** Raises a FPU stack underflow exception for an instruction pushing a result
9117 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9118#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9119 iemFpuStackPushUnderflow(pIemCpu)
9120/** Raises a FPU stack underflow exception for an instruction pushing a result
9121 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9122#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9123 iemFpuStackPushUnderflowTwo(pIemCpu)
9124
9125/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9126 * FPUIP, FPUCS and FOP. */
9127#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9128 iemFpuStackPushOverflow(pIemCpu)
9129/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9130 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9131#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9132 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9133/** Indicates that we (might) have modified the FPU state. */
9134#define IEM_MC_USED_FPU() \
9135 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9136
9137/**
9138 * Calls a MMX assembly implementation taking two visible arguments.
9139 *
9140 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9141 * @param a0 The first extra argument.
9142 * @param a1 The second extra argument.
9143 */
9144#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9145 do { \
9146 iemFpuPrepareUsage(pIemCpu); \
9147 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9148 } while (0)
9149
9150/**
9151 * Calls a MMX assembly implementation taking three visible arguments.
9152 *
9153 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9154 * @param a0 The first extra argument.
9155 * @param a1 The second extra argument.
9156 * @param a2 The third extra argument.
9157 */
9158#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9159 do { \
9160 iemFpuPrepareUsage(pIemCpu); \
9161 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9162 } while (0)
9163
9164
9165/**
9166 * Calls a SSE assembly implementation taking two visible arguments.
9167 *
9168 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9169 * @param a0 The first extra argument.
9170 * @param a1 The second extra argument.
9171 */
9172#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9173 do { \
9174 iemFpuPrepareUsageSse(pIemCpu); \
9175 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9176 } while (0)
9177
9178/**
9179 * Calls a SSE assembly implementation taking three visible arguments.
9180 *
9181 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9182 * @param a0 The first extra argument.
9183 * @param a1 The second extra argument.
9184 * @param a2 The third extra argument.
9185 */
9186#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9187 do { \
9188 iemFpuPrepareUsageSse(pIemCpu); \
9189 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9190 } while (0)
9191
9192
9193/** @note Not for IOPL or IF testing. */
9194#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9195/** @note Not for IOPL or IF testing. */
9196#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9197/** @note Not for IOPL or IF testing. */
9198#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9199/** @note Not for IOPL or IF testing. */
9200#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9201/** @note Not for IOPL or IF testing. */
9202#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9203 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9204 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9205/** @note Not for IOPL or IF testing. */
9206#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9207 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9208 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9209/** @note Not for IOPL or IF testing. */
9210#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9211 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9212 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9213 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9214/** @note Not for IOPL or IF testing. */
9215#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9216 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9217 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9218 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9219#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9220#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9221#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9222/** @note Not for IOPL or IF testing. */
9223#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9224 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9225 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9226/** @note Not for IOPL or IF testing. */
9227#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9228 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9229 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9230/** @note Not for IOPL or IF testing. */
9231#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9232 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9233 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9234/** @note Not for IOPL or IF testing. */
9235#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9236 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9237 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9238/** @note Not for IOPL or IF testing. */
9239#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9240 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9241 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9242/** @note Not for IOPL or IF testing. */
9243#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9244 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9245 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9246#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9247#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9248#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9249 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9250#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9251 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9252#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9253 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9254#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9255 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9256#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9257 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9258#define IEM_MC_IF_FCW_IM() \
9259 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9260
9261#define IEM_MC_ELSE() } else {
9262#define IEM_MC_ENDIF() } do {} while (0)
9263
9264/** @} */
9265
9266
9267/** @name Opcode Debug Helpers.
9268 * @{
9269 */
9270#ifdef DEBUG
9271# define IEMOP_MNEMONIC(a_szMnemonic) \
9272 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9273 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9274# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9275 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9276 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9277#else
9278# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9279# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9280#endif
9281
9282/** @} */
9283
9284
9285/** @name Opcode Helpers.
9286 * @{
9287 */
9288
9289/** The instruction raises an \#UD in real and V8086 mode. */
9290#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9291 do \
9292 { \
9293 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9294 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9295 } while (0)
9296
9297/** The instruction allows no lock prefixing (in this encoding), throw \#UD if
9298 * lock prefixed.
9299 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9300#define IEMOP_HLP_NO_LOCK_PREFIX() \
9301 do \
9302 { \
9303 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9304 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9305 } while (0)
9306
9307/** The instruction is not available in 64-bit mode, throw \#UD if we're in
9308 * 64-bit mode. */
9309#define IEMOP_HLP_NO_64BIT() \
9310 do \
9311 { \
9312 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9313 return IEMOP_RAISE_INVALID_OPCODE(); \
9314 } while (0)
9315
9316/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
9317 * 64-bit mode. */
9318#define IEMOP_HLP_ONLY_64BIT() \
9319 do \
9320 { \
9321 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9322 return IEMOP_RAISE_INVALID_OPCODE(); \
9323 } while (0)
9324
9325/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9326#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9327 do \
9328 { \
9329 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9330 iemRecalEffOpSize64Default(pIemCpu); \
9331 } while (0)
9332
9333/** The instruction has 64-bit operand size if 64-bit mode. */
9334#define IEMOP_HLP_64BIT_OP_SIZE() \
9335 do \
9336 { \
9337 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9338 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9339 } while (0)
9340
9341/** Only a REX prefix immediately preceeding the first opcode byte takes
9342 * effect. This macro helps ensuring this as well as logging bad guest code. */
9343#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9344 do \
9345 { \
9346 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9347 { \
9348 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9349 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9350 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9351 pIemCpu->uRexB = 0; \
9352 pIemCpu->uRexIndex = 0; \
9353 pIemCpu->uRexReg = 0; \
9354 iemRecalEffOpSize(pIemCpu); \
9355 } \
9356 } while (0)
9357
9358/**
9359 * Done decoding.
9360 */
9361#define IEMOP_HLP_DONE_DECODING() \
9362 do \
9363 { \
9364 /*nothing for now, maybe later... */ \
9365 } while (0)
9366
9367/**
9368 * Done decoding, raise \#UD exception if lock prefix present.
9369 */
9370#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9371 do \
9372 { \
9373 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9374 { /* likely */ } \
9375 else \
9376 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9377 } while (0)
9378#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9379 do \
9380 { \
9381 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9382 { /* likely */ } \
9383 else \
9384 { \
9385 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9386 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9387 } \
9388 } while (0)
9389#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9390 do \
9391 { \
9392 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9393 { /* likely */ } \
9394 else \
9395 { \
9396 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9397 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9398 } \
9399 } while (0)
9400/**
9401 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9402 * are present.
9403 */
9404#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9405 do \
9406 { \
9407 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9408 { /* likely */ } \
9409 else \
9410 return IEMOP_RAISE_INVALID_OPCODE(); \
9411 } while (0)
9412
9413
9414/**
9415 * Calculates the effective address of a ModR/M memory operand.
9416 *
9417 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9418 *
9419 * @return Strict VBox status code.
9420 * @param pIemCpu The IEM per CPU data.
9421 * @param bRm The ModRM byte.
9422 * @param cbImm The size of any immediate following the
9423 * effective address opcode bytes. Important for
9424 * RIP relative addressing.
9425 * @param pGCPtrEff Where to return the effective address.
9426 */
9427IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9428{
9429 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9430 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9431#define SET_SS_DEF() \
9432 do \
9433 { \
9434 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9435 pIemCpu->iEffSeg = X86_SREG_SS; \
9436 } while (0)
9437
9438 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9439 {
9440/** @todo Check the effective address size crap! */
9441 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9442 {
9443 uint16_t u16EffAddr;
9444
9445 /* Handle the disp16 form with no registers first. */
9446 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9447 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9448 else
9449 {
9450 /* Get the displacment. */
9451 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9452 {
9453 case 0: u16EffAddr = 0; break;
9454 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9455 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9456 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9457 }
9458
9459 /* Add the base and index registers to the disp. */
9460 switch (bRm & X86_MODRM_RM_MASK)
9461 {
9462 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9463 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9464 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9465 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9466 case 4: u16EffAddr += pCtx->si; break;
9467 case 5: u16EffAddr += pCtx->di; break;
9468 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9469 case 7: u16EffAddr += pCtx->bx; break;
9470 }
9471 }
9472
9473 *pGCPtrEff = u16EffAddr;
9474 }
9475 else
9476 {
9477 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9478 uint32_t u32EffAddr;
9479
9480 /* Handle the disp32 form with no registers first. */
9481 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9482 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9483 else
9484 {
9485 /* Get the register (or SIB) value. */
9486 switch ((bRm & X86_MODRM_RM_MASK))
9487 {
9488 case 0: u32EffAddr = pCtx->eax; break;
9489 case 1: u32EffAddr = pCtx->ecx; break;
9490 case 2: u32EffAddr = pCtx->edx; break;
9491 case 3: u32EffAddr = pCtx->ebx; break;
9492 case 4: /* SIB */
9493 {
9494 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9495
9496 /* Get the index and scale it. */
9497 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9498 {
9499 case 0: u32EffAddr = pCtx->eax; break;
9500 case 1: u32EffAddr = pCtx->ecx; break;
9501 case 2: u32EffAddr = pCtx->edx; break;
9502 case 3: u32EffAddr = pCtx->ebx; break;
9503 case 4: u32EffAddr = 0; /*none */ break;
9504 case 5: u32EffAddr = pCtx->ebp; break;
9505 case 6: u32EffAddr = pCtx->esi; break;
9506 case 7: u32EffAddr = pCtx->edi; break;
9507 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9508 }
9509 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9510
9511 /* add base */
9512 switch (bSib & X86_SIB_BASE_MASK)
9513 {
9514 case 0: u32EffAddr += pCtx->eax; break;
9515 case 1: u32EffAddr += pCtx->ecx; break;
9516 case 2: u32EffAddr += pCtx->edx; break;
9517 case 3: u32EffAddr += pCtx->ebx; break;
9518 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9519 case 5:
9520 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9521 {
9522 u32EffAddr += pCtx->ebp;
9523 SET_SS_DEF();
9524 }
9525 else
9526 {
9527 uint32_t u32Disp;
9528 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9529 u32EffAddr += u32Disp;
9530 }
9531 break;
9532 case 6: u32EffAddr += pCtx->esi; break;
9533 case 7: u32EffAddr += pCtx->edi; break;
9534 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9535 }
9536 break;
9537 }
9538 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9539 case 6: u32EffAddr = pCtx->esi; break;
9540 case 7: u32EffAddr = pCtx->edi; break;
9541 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9542 }
9543
9544 /* Get and add the displacement. */
9545 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9546 {
9547 case 0:
9548 break;
9549 case 1:
9550 {
9551 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9552 u32EffAddr += i8Disp;
9553 break;
9554 }
9555 case 2:
9556 {
9557 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9558 u32EffAddr += u32Disp;
9559 break;
9560 }
9561 default:
9562 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9563 }
9564
9565 }
9566 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9567 *pGCPtrEff = u32EffAddr;
9568 else
9569 {
9570 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9571 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9572 }
9573 }
9574 }
9575 else
9576 {
9577 uint64_t u64EffAddr;
9578
9579 /* Handle the rip+disp32 form with no registers first. */
9580 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9581 {
9582 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9583 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9584 }
9585 else
9586 {
9587 /* Get the register (or SIB) value. */
9588 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9589 {
9590 case 0: u64EffAddr = pCtx->rax; break;
9591 case 1: u64EffAddr = pCtx->rcx; break;
9592 case 2: u64EffAddr = pCtx->rdx; break;
9593 case 3: u64EffAddr = pCtx->rbx; break;
9594 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9595 case 6: u64EffAddr = pCtx->rsi; break;
9596 case 7: u64EffAddr = pCtx->rdi; break;
9597 case 8: u64EffAddr = pCtx->r8; break;
9598 case 9: u64EffAddr = pCtx->r9; break;
9599 case 10: u64EffAddr = pCtx->r10; break;
9600 case 11: u64EffAddr = pCtx->r11; break;
9601 case 13: u64EffAddr = pCtx->r13; break;
9602 case 14: u64EffAddr = pCtx->r14; break;
9603 case 15: u64EffAddr = pCtx->r15; break;
9604 /* SIB */
9605 case 4:
9606 case 12:
9607 {
9608 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9609
9610 /* Get the index and scale it. */
9611 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9612 {
9613 case 0: u64EffAddr = pCtx->rax; break;
9614 case 1: u64EffAddr = pCtx->rcx; break;
9615 case 2: u64EffAddr = pCtx->rdx; break;
9616 case 3: u64EffAddr = pCtx->rbx; break;
9617 case 4: u64EffAddr = 0; /*none */ break;
9618 case 5: u64EffAddr = pCtx->rbp; break;
9619 case 6: u64EffAddr = pCtx->rsi; break;
9620 case 7: u64EffAddr = pCtx->rdi; break;
9621 case 8: u64EffAddr = pCtx->r8; break;
9622 case 9: u64EffAddr = pCtx->r9; break;
9623 case 10: u64EffAddr = pCtx->r10; break;
9624 case 11: u64EffAddr = pCtx->r11; break;
9625 case 12: u64EffAddr = pCtx->r12; break;
9626 case 13: u64EffAddr = pCtx->r13; break;
9627 case 14: u64EffAddr = pCtx->r14; break;
9628 case 15: u64EffAddr = pCtx->r15; break;
9629 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9630 }
9631 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9632
9633 /* add base */
9634 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9635 {
9636 case 0: u64EffAddr += pCtx->rax; break;
9637 case 1: u64EffAddr += pCtx->rcx; break;
9638 case 2: u64EffAddr += pCtx->rdx; break;
9639 case 3: u64EffAddr += pCtx->rbx; break;
9640 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9641 case 6: u64EffAddr += pCtx->rsi; break;
9642 case 7: u64EffAddr += pCtx->rdi; break;
9643 case 8: u64EffAddr += pCtx->r8; break;
9644 case 9: u64EffAddr += pCtx->r9; break;
9645 case 10: u64EffAddr += pCtx->r10; break;
9646 case 11: u64EffAddr += pCtx->r11; break;
9647 case 12: u64EffAddr += pCtx->r12; break;
9648 case 14: u64EffAddr += pCtx->r14; break;
9649 case 15: u64EffAddr += pCtx->r15; break;
9650 /* complicated encodings */
9651 case 5:
9652 case 13:
9653 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9654 {
9655 if (!pIemCpu->uRexB)
9656 {
9657 u64EffAddr += pCtx->rbp;
9658 SET_SS_DEF();
9659 }
9660 else
9661 u64EffAddr += pCtx->r13;
9662 }
9663 else
9664 {
9665 uint32_t u32Disp;
9666 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9667 u64EffAddr += (int32_t)u32Disp;
9668 }
9669 break;
9670 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9671 }
9672 break;
9673 }
9674 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9675 }
9676
9677 /* Get and add the displacement. */
9678 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9679 {
9680 case 0:
9681 break;
9682 case 1:
9683 {
9684 int8_t i8Disp;
9685 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9686 u64EffAddr += i8Disp;
9687 break;
9688 }
9689 case 2:
9690 {
9691 uint32_t u32Disp;
9692 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9693 u64EffAddr += (int32_t)u32Disp;
9694 break;
9695 }
9696 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9697 }
9698
9699 }
9700
9701 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9702 *pGCPtrEff = u64EffAddr;
9703 else
9704 {
9705 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9706 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9707 }
9708 }
9709
9710 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9711 return VINF_SUCCESS;
9712}
9713
9714/** @} */
9715
9716
9717
9718/*
9719 * Include the instructions
9720 */
9721#include "IEMAllInstructions.cpp.h"
9722
9723
9724
9725
9726#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9727
9728/**
9729 * Sets up execution verification mode.
9730 */
9731IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9732{
9733 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9734 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9735
9736 /*
9737 * Always note down the address of the current instruction.
9738 */
9739 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9740 pIemCpu->uOldRip = pOrgCtx->rip;
9741
9742 /*
9743 * Enable verification and/or logging.
9744 */
9745 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9746 if ( fNewNoRem
9747 && ( 0
9748#if 0 /* auto enable on first paged protected mode interrupt */
9749 || ( pOrgCtx->eflags.Bits.u1IF
9750 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9751 && TRPMHasTrap(pVCpu)
9752 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9753#endif
9754#if 0
9755 || ( pOrgCtx->cs == 0x10
9756 && ( pOrgCtx->rip == 0x90119e3e
9757 || pOrgCtx->rip == 0x901d9810)
9758#endif
9759#if 0 /* Auto enable DSL - FPU stuff. */
9760 || ( pOrgCtx->cs == 0x10
9761 && (// pOrgCtx->rip == 0xc02ec07f
9762 //|| pOrgCtx->rip == 0xc02ec082
9763 //|| pOrgCtx->rip == 0xc02ec0c9
9764 0
9765 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9766#endif
9767#if 0 /* Auto enable DSL - fstp st0 stuff. */
9768 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9769#endif
9770#if 0
9771 || pOrgCtx->rip == 0x9022bb3a
9772#endif
9773#if 0
9774 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9775#endif
9776#if 0
9777 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9778 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9779#endif
9780#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9781 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9782 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9783 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9784#endif
9785#if 0 /* NT4SP1 - xadd early boot. */
9786 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9787#endif
9788#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9789 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9790#endif
9791#if 0 /* NT4SP1 - cmpxchg (AMD). */
9792 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9793#endif
9794#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9795 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9796#endif
9797#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9798 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9799
9800#endif
9801#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9802 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9803
9804#endif
9805#if 0 /* NT4SP1 - frstor [ecx] */
9806 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9807#endif
9808#if 0 /* xxxxxx - All long mode code. */
9809 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9810#endif
9811#if 0 /* rep movsq linux 3.7 64-bit boot. */
9812 || (pOrgCtx->rip == 0x0000000000100241)
9813#endif
9814#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9815 || (pOrgCtx->rip == 0x000000000215e240)
9816#endif
9817#if 0 /* DOS's size-overridden iret to v8086. */
9818 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9819#endif
9820 )
9821 )
9822 {
9823 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9824 RTLogFlags(NULL, "enabled");
9825 fNewNoRem = false;
9826 }
9827 if (fNewNoRem != pIemCpu->fNoRem)
9828 {
9829 pIemCpu->fNoRem = fNewNoRem;
9830 if (!fNewNoRem)
9831 {
9832 LogAlways(("Enabling verification mode!\n"));
9833 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9834 }
9835 else
9836 LogAlways(("Disabling verification mode!\n"));
9837 }
9838
9839 /*
9840 * Switch state.
9841 */
9842 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9843 {
9844 static CPUMCTX s_DebugCtx; /* Ugly! */
9845
9846 s_DebugCtx = *pOrgCtx;
9847 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9848 }
9849
9850 /*
9851 * See if there is an interrupt pending in TRPM and inject it if we can.
9852 */
9853 pIemCpu->uInjectCpl = UINT8_MAX;
9854 if ( pOrgCtx->eflags.Bits.u1IF
9855 && TRPMHasTrap(pVCpu)
9856 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9857 {
9858 uint8_t u8TrapNo;
9859 TRPMEVENT enmType;
9860 RTGCUINT uErrCode;
9861 RTGCPTR uCr2;
9862 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9863 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9864 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9865 TRPMResetTrap(pVCpu);
9866 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9867 }
9868
9869 /*
9870 * Reset the counters.
9871 */
9872 pIemCpu->cIOReads = 0;
9873 pIemCpu->cIOWrites = 0;
9874 pIemCpu->fIgnoreRaxRdx = false;
9875 pIemCpu->fOverlappingMovs = false;
9876 pIemCpu->fProblematicMemory = false;
9877 pIemCpu->fUndefinedEFlags = 0;
9878
9879 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9880 {
9881 /*
9882 * Free all verification records.
9883 */
9884 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9885 pIemCpu->pIemEvtRecHead = NULL;
9886 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9887 do
9888 {
9889 while (pEvtRec)
9890 {
9891 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9892 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9893 pIemCpu->pFreeEvtRec = pEvtRec;
9894 pEvtRec = pNext;
9895 }
9896 pEvtRec = pIemCpu->pOtherEvtRecHead;
9897 pIemCpu->pOtherEvtRecHead = NULL;
9898 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9899 } while (pEvtRec);
9900 }
9901}
9902
9903
9904/**
9905 * Allocate an event record.
9906 * @returns Pointer to a record.
9907 */
9908IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9909{
9910 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9911 return NULL;
9912
9913 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9914 if (pEvtRec)
9915 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9916 else
9917 {
9918 if (!pIemCpu->ppIemEvtRecNext)
9919 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9920
9921 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9922 if (!pEvtRec)
9923 return NULL;
9924 }
9925 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9926 pEvtRec->pNext = NULL;
9927 return pEvtRec;
9928}
9929
9930
9931/**
9932 * IOMMMIORead notification.
9933 */
9934VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9935{
9936 PVMCPU pVCpu = VMMGetCpu(pVM);
9937 if (!pVCpu)
9938 return;
9939 PIEMCPU pIemCpu = &pVCpu->iem.s;
9940 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9941 if (!pEvtRec)
9942 return;
9943 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9944 pEvtRec->u.RamRead.GCPhys = GCPhys;
9945 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9946 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9947 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9948}
9949
9950
9951/**
9952 * IOMMMIOWrite notification.
9953 */
9954VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
9955{
9956 PVMCPU pVCpu = VMMGetCpu(pVM);
9957 if (!pVCpu)
9958 return;
9959 PIEMCPU pIemCpu = &pVCpu->iem.s;
9960 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9961 if (!pEvtRec)
9962 return;
9963 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
9964 pEvtRec->u.RamWrite.GCPhys = GCPhys;
9965 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
9966 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
9967 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
9968 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
9969 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
9970 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9971 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9972}
9973
9974
9975/**
9976 * IOMIOPortRead notification.
9977 */
9978VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
9979{
9980 PVMCPU pVCpu = VMMGetCpu(pVM);
9981 if (!pVCpu)
9982 return;
9983 PIEMCPU pIemCpu = &pVCpu->iem.s;
9984 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9985 if (!pEvtRec)
9986 return;
9987 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9988 pEvtRec->u.IOPortRead.Port = Port;
9989 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9990 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9991 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9992}
9993
9994/**
9995 * IOMIOPortWrite notification.
9996 */
9997VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9998{
9999 PVMCPU pVCpu = VMMGetCpu(pVM);
10000 if (!pVCpu)
10001 return;
10002 PIEMCPU pIemCpu = &pVCpu->iem.s;
10003 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10004 if (!pEvtRec)
10005 return;
10006 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10007 pEvtRec->u.IOPortWrite.Port = Port;
10008 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10009 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10010 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10011 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10012}
10013
10014
10015VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10016{
10017 AssertFailed();
10018}
10019
10020
10021VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10022{
10023 AssertFailed();
10024}
10025
10026
10027/**
10028 * Fakes and records an I/O port read.
10029 *
10030 * @returns VINF_SUCCESS.
10031 * @param pIemCpu The IEM per CPU data.
10032 * @param Port The I/O port.
10033 * @param pu32Value Where to store the fake value.
10034 * @param cbValue The size of the access.
10035 */
10036IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10037{
10038 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10039 if (pEvtRec)
10040 {
10041 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10042 pEvtRec->u.IOPortRead.Port = Port;
10043 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
10044 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10045 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10046 }
10047 pIemCpu->cIOReads++;
10048 *pu32Value = 0xcccccccc;
10049 return VINF_SUCCESS;
10050}
10051
10052
10053/**
10054 * Fakes and records an I/O port write.
10055 *
10056 * @returns VINF_SUCCESS.
10057 * @param pIemCpu The IEM per CPU data.
10058 * @param Port The I/O port.
10059 * @param u32Value The value being written.
10060 * @param cbValue The size of the access.
10061 */
10062IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10063{
10064 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10065 if (pEvtRec)
10066 {
10067 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10068 pEvtRec->u.IOPortWrite.Port = Port;
10069 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10070 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10071 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10072 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10073 }
10074 pIemCpu->cIOWrites++;
10075 return VINF_SUCCESS;
10076}
10077
10078
10079/**
10080 * Used to add extra details about a stub case.
10081 * @param pIemCpu The IEM per CPU state.
10082 */
10083IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10084{
10085 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10086 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10087 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10088 char szRegs[4096];
10089 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10090 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10091 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10092 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10093 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10094 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10095 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10096 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10097 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10098 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10099 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10100 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10101 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10102 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10103 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10104 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10105 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10106 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10107 " efer=%016VR{efer}\n"
10108 " pat=%016VR{pat}\n"
10109 " sf_mask=%016VR{sf_mask}\n"
10110 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10111 " lstar=%016VR{lstar}\n"
10112 " star=%016VR{star} cstar=%016VR{cstar}\n"
10113 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10114 );
10115
10116 char szInstr1[256];
10117 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10118 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10119 szInstr1, sizeof(szInstr1), NULL);
10120 char szInstr2[256];
10121 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10122 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10123 szInstr2, sizeof(szInstr2), NULL);
10124
10125 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10126}
10127
10128
10129/**
10130 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10131 * dump to the assertion info.
10132 *
10133 * @param pEvtRec The record to dump.
10134 */
10135IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10136{
10137 switch (pEvtRec->enmEvent)
10138 {
10139 case IEMVERIFYEVENT_IOPORT_READ:
10140 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10141 pEvtRec->u.IOPortWrite.Port,
10142 pEvtRec->u.IOPortWrite.cbValue);
10143 break;
10144 case IEMVERIFYEVENT_IOPORT_WRITE:
10145 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10146 pEvtRec->u.IOPortWrite.Port,
10147 pEvtRec->u.IOPortWrite.cbValue,
10148 pEvtRec->u.IOPortWrite.u32Value);
10149 break;
10150 case IEMVERIFYEVENT_RAM_READ:
10151 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10152 pEvtRec->u.RamRead.GCPhys,
10153 pEvtRec->u.RamRead.cb);
10154 break;
10155 case IEMVERIFYEVENT_RAM_WRITE:
10156 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10157 pEvtRec->u.RamWrite.GCPhys,
10158 pEvtRec->u.RamWrite.cb,
10159 (int)pEvtRec->u.RamWrite.cb,
10160 pEvtRec->u.RamWrite.ab);
10161 break;
10162 default:
10163 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10164 break;
10165 }
10166}
10167
10168
10169/**
10170 * Raises an assertion on the specified record, showing the given message with
10171 * a record dump attached.
10172 *
10173 * @param pIemCpu The IEM per CPU data.
10174 * @param pEvtRec1 The first record.
10175 * @param pEvtRec2 The second record.
10176 * @param pszMsg The message explaining why we're asserting.
10177 */
10178IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10179{
10180 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10181 iemVerifyAssertAddRecordDump(pEvtRec1);
10182 iemVerifyAssertAddRecordDump(pEvtRec2);
10183 iemVerifyAssertMsg2(pIemCpu);
10184 RTAssertPanic();
10185}
10186
10187
10188/**
10189 * Raises an assertion on the specified record, showing the given message with
10190 * a record dump attached.
10191 *
10192 * @param pIemCpu The IEM per CPU data.
10193 * @param pEvtRec1 The first record.
10194 * @param pszMsg The message explaining why we're asserting.
10195 */
10196IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10197{
10198 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10199 iemVerifyAssertAddRecordDump(pEvtRec);
10200 iemVerifyAssertMsg2(pIemCpu);
10201 RTAssertPanic();
10202}
10203
10204
10205/**
10206 * Verifies a write record.
10207 *
10208 * @param pIemCpu The IEM per CPU data.
10209 * @param pEvtRec The write record.
10210 * @param fRem Set if REM was doing the other executing. If clear
10211 * it was HM.
10212 */
10213IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10214{
10215 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10216 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10217 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10218 if ( RT_FAILURE(rc)
10219 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10220 {
10221 /* fend off ins */
10222 if ( !pIemCpu->cIOReads
10223 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10224 || ( pEvtRec->u.RamWrite.cb != 1
10225 && pEvtRec->u.RamWrite.cb != 2
10226 && pEvtRec->u.RamWrite.cb != 4) )
10227 {
10228 /* fend off ROMs and MMIO */
10229 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10230 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10231 {
10232 /* fend off fxsave */
10233 if (pEvtRec->u.RamWrite.cb != 512)
10234 {
10235 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10236 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10237 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10238 RTAssertMsg2Add("%s: %.*Rhxs\n"
10239 "iem: %.*Rhxs\n",
10240 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10241 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10242 iemVerifyAssertAddRecordDump(pEvtRec);
10243 iemVerifyAssertMsg2(pIemCpu);
10244 RTAssertPanic();
10245 }
10246 }
10247 }
10248 }
10249
10250}
10251
10252/**
10253 * Performs the post-execution verfication checks.
10254 */
10255IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10256{
10257 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10258 return;
10259
10260 /*
10261 * Switch back the state.
10262 */
10263 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10264 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10265 Assert(pOrgCtx != pDebugCtx);
10266 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10267
10268 /*
10269 * Execute the instruction in REM.
10270 */
10271 bool fRem = false;
10272 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10273 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10274 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10275#ifdef IEM_VERIFICATION_MODE_FULL_HM
10276 if ( HMIsEnabled(pVM)
10277 && pIemCpu->cIOReads == 0
10278 && pIemCpu->cIOWrites == 0
10279 && !pIemCpu->fProblematicMemory)
10280 {
10281 uint64_t uStartRip = pOrgCtx->rip;
10282 unsigned iLoops = 0;
10283 do
10284 {
10285 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10286 iLoops++;
10287 } while ( rc == VINF_SUCCESS
10288 || ( rc == VINF_EM_DBG_STEPPED
10289 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10290 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10291 || ( pOrgCtx->rip != pDebugCtx->rip
10292 && pIemCpu->uInjectCpl != UINT8_MAX
10293 && iLoops < 8) );
10294 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10295 rc = VINF_SUCCESS;
10296 }
10297#endif
10298 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10299 || rc == VINF_IOM_R3_IOPORT_READ
10300 || rc == VINF_IOM_R3_IOPORT_WRITE
10301 || rc == VINF_IOM_R3_MMIO_READ
10302 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10303 || rc == VINF_IOM_R3_MMIO_WRITE
10304 || rc == VINF_CPUM_R3_MSR_READ
10305 || rc == VINF_CPUM_R3_MSR_WRITE
10306 || rc == VINF_EM_RESCHEDULE
10307 )
10308 {
10309 EMRemLock(pVM);
10310 rc = REMR3EmulateInstruction(pVM, pVCpu);
10311 AssertRC(rc);
10312 EMRemUnlock(pVM);
10313 fRem = true;
10314 }
10315
10316 /*
10317 * Compare the register states.
10318 */
10319 unsigned cDiffs = 0;
10320 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10321 {
10322 //Log(("REM and IEM ends up with different registers!\n"));
10323 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10324
10325# define CHECK_FIELD(a_Field) \
10326 do \
10327 { \
10328 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10329 { \
10330 switch (sizeof(pOrgCtx->a_Field)) \
10331 { \
10332 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10333 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10334 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10335 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10336 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10337 } \
10338 cDiffs++; \
10339 } \
10340 } while (0)
10341# define CHECK_XSTATE_FIELD(a_Field) \
10342 do \
10343 { \
10344 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10345 { \
10346 switch (sizeof(pOrgXState->a_Field)) \
10347 { \
10348 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10349 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10350 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10351 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10352 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10353 } \
10354 cDiffs++; \
10355 } \
10356 } while (0)
10357
10358# define CHECK_BIT_FIELD(a_Field) \
10359 do \
10360 { \
10361 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10362 { \
10363 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10364 cDiffs++; \
10365 } \
10366 } while (0)
10367
10368# define CHECK_SEL(a_Sel) \
10369 do \
10370 { \
10371 CHECK_FIELD(a_Sel.Sel); \
10372 CHECK_FIELD(a_Sel.Attr.u); \
10373 CHECK_FIELD(a_Sel.u64Base); \
10374 CHECK_FIELD(a_Sel.u32Limit); \
10375 CHECK_FIELD(a_Sel.fFlags); \
10376 } while (0)
10377
10378 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10379 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10380
10381#if 1 /* The recompiler doesn't update these the intel way. */
10382 if (fRem)
10383 {
10384 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10385 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10386 pOrgXState->x87.CS = pDebugXState->x87.CS;
10387 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10388 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10389 pOrgXState->x87.DS = pDebugXState->x87.DS;
10390 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10391 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10392 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10393 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10394 }
10395#endif
10396 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10397 {
10398 RTAssertMsg2Weak(" the FPU state differs\n");
10399 cDiffs++;
10400 CHECK_XSTATE_FIELD(x87.FCW);
10401 CHECK_XSTATE_FIELD(x87.FSW);
10402 CHECK_XSTATE_FIELD(x87.FTW);
10403 CHECK_XSTATE_FIELD(x87.FOP);
10404 CHECK_XSTATE_FIELD(x87.FPUIP);
10405 CHECK_XSTATE_FIELD(x87.CS);
10406 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10407 CHECK_XSTATE_FIELD(x87.FPUDP);
10408 CHECK_XSTATE_FIELD(x87.DS);
10409 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10410 CHECK_XSTATE_FIELD(x87.MXCSR);
10411 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10412 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10413 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10414 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10415 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10416 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10417 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10418 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10419 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10420 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10421 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10422 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10423 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10424 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10425 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10426 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10427 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10428 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10429 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10430 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10431 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10432 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10433 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10434 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10435 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10436 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10437 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10438 }
10439 CHECK_FIELD(rip);
10440 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10441 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10442 {
10443 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10444 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10445 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10446 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10447 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10448 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10449 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10450 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10451 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10452 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10453 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10454 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10455 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10456 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10457 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10458 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10459 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10460 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10461 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10462 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10463 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10464 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10465 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10466 }
10467
10468 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10469 CHECK_FIELD(rax);
10470 CHECK_FIELD(rcx);
10471 if (!pIemCpu->fIgnoreRaxRdx)
10472 CHECK_FIELD(rdx);
10473 CHECK_FIELD(rbx);
10474 CHECK_FIELD(rsp);
10475 CHECK_FIELD(rbp);
10476 CHECK_FIELD(rsi);
10477 CHECK_FIELD(rdi);
10478 CHECK_FIELD(r8);
10479 CHECK_FIELD(r9);
10480 CHECK_FIELD(r10);
10481 CHECK_FIELD(r11);
10482 CHECK_FIELD(r12);
10483 CHECK_FIELD(r13);
10484 CHECK_SEL(cs);
10485 CHECK_SEL(ss);
10486 CHECK_SEL(ds);
10487 CHECK_SEL(es);
10488 CHECK_SEL(fs);
10489 CHECK_SEL(gs);
10490 CHECK_FIELD(cr0);
10491
10492 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10493 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10494 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10495 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10496 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10497 {
10498 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10499 { /* ignore */ }
10500 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10501 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10502 && fRem)
10503 { /* ignore */ }
10504 else
10505 CHECK_FIELD(cr2);
10506 }
10507 CHECK_FIELD(cr3);
10508 CHECK_FIELD(cr4);
10509 CHECK_FIELD(dr[0]);
10510 CHECK_FIELD(dr[1]);
10511 CHECK_FIELD(dr[2]);
10512 CHECK_FIELD(dr[3]);
10513 CHECK_FIELD(dr[6]);
10514 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10515 CHECK_FIELD(dr[7]);
10516 CHECK_FIELD(gdtr.cbGdt);
10517 CHECK_FIELD(gdtr.pGdt);
10518 CHECK_FIELD(idtr.cbIdt);
10519 CHECK_FIELD(idtr.pIdt);
10520 CHECK_SEL(ldtr);
10521 CHECK_SEL(tr);
10522 CHECK_FIELD(SysEnter.cs);
10523 CHECK_FIELD(SysEnter.eip);
10524 CHECK_FIELD(SysEnter.esp);
10525 CHECK_FIELD(msrEFER);
10526 CHECK_FIELD(msrSTAR);
10527 CHECK_FIELD(msrPAT);
10528 CHECK_FIELD(msrLSTAR);
10529 CHECK_FIELD(msrCSTAR);
10530 CHECK_FIELD(msrSFMASK);
10531 CHECK_FIELD(msrKERNELGSBASE);
10532
10533 if (cDiffs != 0)
10534 {
10535 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10536 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10537 iemVerifyAssertMsg2(pIemCpu);
10538 RTAssertPanic();
10539 }
10540# undef CHECK_FIELD
10541# undef CHECK_BIT_FIELD
10542 }
10543
10544 /*
10545 * If the register state compared fine, check the verification event
10546 * records.
10547 */
10548 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10549 {
10550 /*
10551 * Compare verficiation event records.
10552 * - I/O port accesses should be a 1:1 match.
10553 */
10554 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10555 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10556 while (pIemRec && pOtherRec)
10557 {
10558 /* Since we might miss RAM writes and reads, ignore reads and check
10559 that any written memory is the same extra ones. */
10560 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10561 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10562 && pIemRec->pNext)
10563 {
10564 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10565 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10566 pIemRec = pIemRec->pNext;
10567 }
10568
10569 /* Do the compare. */
10570 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10571 {
10572 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10573 break;
10574 }
10575 bool fEquals;
10576 switch (pIemRec->enmEvent)
10577 {
10578 case IEMVERIFYEVENT_IOPORT_READ:
10579 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10580 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10581 break;
10582 case IEMVERIFYEVENT_IOPORT_WRITE:
10583 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10584 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10585 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10586 break;
10587 case IEMVERIFYEVENT_RAM_READ:
10588 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10589 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10590 break;
10591 case IEMVERIFYEVENT_RAM_WRITE:
10592 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10593 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10594 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10595 break;
10596 default:
10597 fEquals = false;
10598 break;
10599 }
10600 if (!fEquals)
10601 {
10602 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10603 break;
10604 }
10605
10606 /* advance */
10607 pIemRec = pIemRec->pNext;
10608 pOtherRec = pOtherRec->pNext;
10609 }
10610
10611 /* Ignore extra writes and reads. */
10612 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10613 {
10614 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10615 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10616 pIemRec = pIemRec->pNext;
10617 }
10618 if (pIemRec != NULL)
10619 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10620 else if (pOtherRec != NULL)
10621 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10622 }
10623 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10624}
10625
10626#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10627
10628/* stubs */
10629IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10630{
10631 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10632 return VERR_INTERNAL_ERROR;
10633}
10634
10635IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10636{
10637 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10638 return VERR_INTERNAL_ERROR;
10639}
10640
10641#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10642
10643
10644#ifdef LOG_ENABLED
10645/**
10646 * Logs the current instruction.
10647 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10648 * @param pCtx The current CPU context.
10649 * @param fSameCtx Set if we have the same context information as the VMM,
10650 * clear if we may have already executed an instruction in
10651 * our debug context. When clear, we assume IEMCPU holds
10652 * valid CPU mode info.
10653 */
10654IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10655{
10656# ifdef IN_RING3
10657 if (LogIs2Enabled())
10658 {
10659 char szInstr[256];
10660 uint32_t cbInstr = 0;
10661 if (fSameCtx)
10662 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10663 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10664 szInstr, sizeof(szInstr), &cbInstr);
10665 else
10666 {
10667 uint32_t fFlags = 0;
10668 switch (pVCpu->iem.s.enmCpuMode)
10669 {
10670 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10671 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10672 case IEMMODE_16BIT:
10673 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10674 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10675 else
10676 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10677 break;
10678 }
10679 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10680 szInstr, sizeof(szInstr), &cbInstr);
10681 }
10682
10683 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10684 Log2(("****\n"
10685 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10686 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10687 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10688 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10689 " %s\n"
10690 ,
10691 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10692 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10693 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10694 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10695 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10696 szInstr));
10697
10698 if (LogIs3Enabled())
10699 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10700 }
10701 else
10702# endif
10703 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10704 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10705}
10706#endif
10707
10708
10709/**
10710 * Makes status code addjustments (pass up from I/O and access handler)
10711 * as well as maintaining statistics.
10712 *
10713 * @returns Strict VBox status code to pass up.
10714 * @param pIemCpu The IEM per CPU data.
10715 * @param rcStrict The status from executing an instruction.
10716 */
10717DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10718{
10719 if (rcStrict != VINF_SUCCESS)
10720 {
10721 if (RT_SUCCESS(rcStrict))
10722 {
10723 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10724 || rcStrict == VINF_IOM_R3_IOPORT_READ
10725 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10726 || rcStrict == VINF_IOM_R3_MMIO_READ
10727 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10728 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10729 || rcStrict == VINF_CPUM_R3_MSR_READ
10730 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10731 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10732 || rcStrict == VINF_EM_RAW_TO_R3
10733 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
10734 /* raw-mode / virt handlers only: */
10735 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10736 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10737 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10738 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10739 || rcStrict == VINF_SELM_SYNC_GDT
10740 || rcStrict == VINF_CSAM_PENDING_ACTION
10741 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10742 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10743/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10744 int32_t const rcPassUp = pIemCpu->rcPassUp;
10745 if (rcPassUp == VINF_SUCCESS)
10746 pIemCpu->cRetInfStatuses++;
10747 else if ( rcPassUp < VINF_EM_FIRST
10748 || rcPassUp > VINF_EM_LAST
10749 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10750 {
10751 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10752 pIemCpu->cRetPassUpStatus++;
10753 rcStrict = rcPassUp;
10754 }
10755 else
10756 {
10757 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10758 pIemCpu->cRetInfStatuses++;
10759 }
10760 }
10761 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10762 pIemCpu->cRetAspectNotImplemented++;
10763 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10764 pIemCpu->cRetInstrNotImplemented++;
10765#ifdef IEM_VERIFICATION_MODE_FULL
10766 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10767 rcStrict = VINF_SUCCESS;
10768#endif
10769 else
10770 pIemCpu->cRetErrStatuses++;
10771 }
10772 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10773 {
10774 pIemCpu->cRetPassUpStatus++;
10775 rcStrict = pIemCpu->rcPassUp;
10776 }
10777
10778 return rcStrict;
10779}
10780
10781
10782/**
10783 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10784 * IEMExecOneWithPrefetchedByPC.
10785 *
10786 * @return Strict VBox status code.
10787 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10788 * @param pIemCpu The IEM per CPU data.
10789 * @param fExecuteInhibit If set, execute the instruction following CLI,
10790 * POP SS and MOV SS,GR.
10791 */
10792DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10793{
10794 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10795 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10796 if (rcStrict == VINF_SUCCESS)
10797 pIemCpu->cInstructions++;
10798 if (pIemCpu->cActiveMappings > 0)
10799 iemMemRollback(pIemCpu);
10800//#ifdef DEBUG
10801// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10802//#endif
10803
10804 /* Execute the next instruction as well if a cli, pop ss or
10805 mov ss, Gr has just completed successfully. */
10806 if ( fExecuteInhibit
10807 && rcStrict == VINF_SUCCESS
10808 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10809 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10810 {
10811 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10812 if (rcStrict == VINF_SUCCESS)
10813 {
10814# ifdef LOG_ENABLED
10815 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10816# endif
10817 IEM_OPCODE_GET_NEXT_U8(&b);
10818 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10819 if (rcStrict == VINF_SUCCESS)
10820 pIemCpu->cInstructions++;
10821 if (pIemCpu->cActiveMappings > 0)
10822 iemMemRollback(pIemCpu);
10823 }
10824 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10825 }
10826
10827 /*
10828 * Return value fiddling, statistics and sanity assertions.
10829 */
10830 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10831
10832 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10833 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10834#if defined(IEM_VERIFICATION_MODE_FULL)
10835 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10836 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10837 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10838 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10839#endif
10840 return rcStrict;
10841}
10842
10843
10844#ifdef IN_RC
10845/**
10846 * Re-enters raw-mode or ensure we return to ring-3.
10847 *
10848 * @returns rcStrict, maybe modified.
10849 * @param pIemCpu The IEM CPU structure.
10850 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10851 * @param pCtx The current CPU context.
10852 * @param rcStrict The status code returne by the interpreter.
10853 */
10854DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10855{
10856 if (!pIemCpu->fInPatchCode)
10857 CPUMRawEnter(pVCpu);
10858 return rcStrict;
10859}
10860#endif
10861
10862
10863/**
10864 * Execute one instruction.
10865 *
10866 * @return Strict VBox status code.
10867 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10868 */
10869VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10870{
10871 PIEMCPU pIemCpu = &pVCpu->iem.s;
10872
10873#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10874 iemExecVerificationModeSetup(pIemCpu);
10875#endif
10876#ifdef LOG_ENABLED
10877 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10878 iemLogCurInstr(pVCpu, pCtx, true);
10879#endif
10880
10881 /*
10882 * Do the decoding and emulation.
10883 */
10884 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10885 if (rcStrict == VINF_SUCCESS)
10886 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10887
10888#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10889 /*
10890 * Assert some sanity.
10891 */
10892 iemExecVerificationModeCheck(pIemCpu);
10893#endif
10894#ifdef IN_RC
10895 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10896#endif
10897 if (rcStrict != VINF_SUCCESS)
10898 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10899 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10900 return rcStrict;
10901}
10902
10903
10904VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10905{
10906 PIEMCPU pIemCpu = &pVCpu->iem.s;
10907 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10908 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10909
10910 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10911 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10912 if (rcStrict == VINF_SUCCESS)
10913 {
10914 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10915 if (pcbWritten)
10916 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10917 }
10918
10919#ifdef IN_RC
10920 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10921#endif
10922 return rcStrict;
10923}
10924
10925
10926VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10927 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10928{
10929 PIEMCPU pIemCpu = &pVCpu->iem.s;
10930 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10931 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10932
10933 VBOXSTRICTRC rcStrict;
10934 if ( cbOpcodeBytes
10935 && pCtx->rip == OpcodeBytesPC)
10936 {
10937 iemInitDecoder(pIemCpu, false);
10938 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10939 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10940 rcStrict = VINF_SUCCESS;
10941 }
10942 else
10943 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10944 if (rcStrict == VINF_SUCCESS)
10945 {
10946 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10947 }
10948
10949#ifdef IN_RC
10950 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10951#endif
10952 return rcStrict;
10953}
10954
10955
10956VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10957{
10958 PIEMCPU pIemCpu = &pVCpu->iem.s;
10959 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10960 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10961
10962 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10963 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10964 if (rcStrict == VINF_SUCCESS)
10965 {
10966 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10967 if (pcbWritten)
10968 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10969 }
10970
10971#ifdef IN_RC
10972 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10973#endif
10974 return rcStrict;
10975}
10976
10977
10978VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10979 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10980{
10981 PIEMCPU pIemCpu = &pVCpu->iem.s;
10982 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10983 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10984
10985 VBOXSTRICTRC rcStrict;
10986 if ( cbOpcodeBytes
10987 && pCtx->rip == OpcodeBytesPC)
10988 {
10989 iemInitDecoder(pIemCpu, true);
10990 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10991 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10992 rcStrict = VINF_SUCCESS;
10993 }
10994 else
10995 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10996 if (rcStrict == VINF_SUCCESS)
10997 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10998
10999#ifdef IN_RC
11000 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11001#endif
11002 return rcStrict;
11003}
11004
11005
11006VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11007{
11008 PIEMCPU pIemCpu = &pVCpu->iem.s;
11009
11010 /*
11011 * See if there is an interrupt pending in TRPM and inject it if we can.
11012 */
11013#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11014 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11015# ifdef IEM_VERIFICATION_MODE_FULL
11016 pIemCpu->uInjectCpl = UINT8_MAX;
11017# endif
11018 if ( pCtx->eflags.Bits.u1IF
11019 && TRPMHasTrap(pVCpu)
11020 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11021 {
11022 uint8_t u8TrapNo;
11023 TRPMEVENT enmType;
11024 RTGCUINT uErrCode;
11025 RTGCPTR uCr2;
11026 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11027 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11028 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11029 TRPMResetTrap(pVCpu);
11030 }
11031#else
11032 iemExecVerificationModeSetup(pIemCpu);
11033 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11034#endif
11035
11036 /*
11037 * Log the state.
11038 */
11039#ifdef LOG_ENABLED
11040 iemLogCurInstr(pVCpu, pCtx, true);
11041#endif
11042
11043 /*
11044 * Do the decoding and emulation.
11045 */
11046 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11047 if (rcStrict == VINF_SUCCESS)
11048 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11049
11050#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11051 /*
11052 * Assert some sanity.
11053 */
11054 iemExecVerificationModeCheck(pIemCpu);
11055#endif
11056
11057 /*
11058 * Maybe re-enter raw-mode and log.
11059 */
11060#ifdef IN_RC
11061 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11062#endif
11063 if (rcStrict != VINF_SUCCESS)
11064 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11065 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11066 return rcStrict;
11067}
11068
11069
11070
11071/**
11072 * Injects a trap, fault, abort, software interrupt or external interrupt.
11073 *
11074 * The parameter list matches TRPMQueryTrapAll pretty closely.
11075 *
11076 * @returns Strict VBox status code.
11077 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11078 * @param u8TrapNo The trap number.
11079 * @param enmType What type is it (trap/fault/abort), software
11080 * interrupt or hardware interrupt.
11081 * @param uErrCode The error code if applicable.
11082 * @param uCr2 The CR2 value if applicable.
11083 * @param cbInstr The instruction length (only relevant for
11084 * software interrupts).
11085 */
11086VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11087 uint8_t cbInstr)
11088{
11089 iemInitDecoder(&pVCpu->iem.s, false);
11090#ifdef DBGFTRACE_ENABLED
11091 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11092 u8TrapNo, enmType, uErrCode, uCr2);
11093#endif
11094
11095 uint32_t fFlags;
11096 switch (enmType)
11097 {
11098 case TRPM_HARDWARE_INT:
11099 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11100 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11101 uErrCode = uCr2 = 0;
11102 break;
11103
11104 case TRPM_SOFTWARE_INT:
11105 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11106 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11107 uErrCode = uCr2 = 0;
11108 break;
11109
11110 case TRPM_TRAP:
11111 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11112 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11113 if (u8TrapNo == X86_XCPT_PF)
11114 fFlags |= IEM_XCPT_FLAGS_CR2;
11115 switch (u8TrapNo)
11116 {
11117 case X86_XCPT_DF:
11118 case X86_XCPT_TS:
11119 case X86_XCPT_NP:
11120 case X86_XCPT_SS:
11121 case X86_XCPT_PF:
11122 case X86_XCPT_AC:
11123 fFlags |= IEM_XCPT_FLAGS_ERR;
11124 break;
11125
11126 case X86_XCPT_NMI:
11127 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11128 break;
11129 }
11130 break;
11131
11132 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11133 }
11134
11135 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11136}
11137
11138
11139/**
11140 * Injects the active TRPM event.
11141 *
11142 * @returns Strict VBox status code.
11143 * @param pVCpu The cross context virtual CPU structure.
11144 */
11145VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11146{
11147#ifndef IEM_IMPLEMENTS_TASKSWITCH
11148 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11149#else
11150 uint8_t u8TrapNo;
11151 TRPMEVENT enmType;
11152 RTGCUINT uErrCode;
11153 RTGCUINTPTR uCr2;
11154 uint8_t cbInstr;
11155 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11156 if (RT_FAILURE(rc))
11157 return rc;
11158
11159 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11160
11161 /** @todo Are there any other codes that imply the event was successfully
11162 * delivered to the guest? See @bugref{6607}. */
11163 if ( rcStrict == VINF_SUCCESS
11164 || rcStrict == VINF_IEM_RAISED_XCPT)
11165 {
11166 TRPMResetTrap(pVCpu);
11167 }
11168 return rcStrict;
11169#endif
11170}
11171
11172
11173VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11174{
11175 return VERR_NOT_IMPLEMENTED;
11176}
11177
11178
11179VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11180{
11181 return VERR_NOT_IMPLEMENTED;
11182}
11183
11184
11185#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11186/**
11187 * Executes a IRET instruction with default operand size.
11188 *
11189 * This is for PATM.
11190 *
11191 * @returns VBox status code.
11192 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11193 * @param pCtxCore The register frame.
11194 */
11195VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11196{
11197 PIEMCPU pIemCpu = &pVCpu->iem.s;
11198 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11199
11200 iemCtxCoreToCtx(pCtx, pCtxCore);
11201 iemInitDecoder(pIemCpu);
11202 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11203 if (rcStrict == VINF_SUCCESS)
11204 iemCtxToCtxCore(pCtxCore, pCtx);
11205 else
11206 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11207 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11208 return rcStrict;
11209}
11210#endif
11211
11212
11213/**
11214 * Macro used by the IEMExec* method to check the given instruction length.
11215 *
11216 * Will return on failure!
11217 *
11218 * @param a_cbInstr The given instruction length.
11219 * @param a_cbMin The minimum length.
11220 */
11221#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11222 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11223 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11224
11225
11226/**
11227 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11228 *
11229 * This API ASSUMES that the caller has already verified that the guest code is
11230 * allowed to access the I/O port. (The I/O port is in the DX register in the
11231 * guest state.)
11232 *
11233 * @returns Strict VBox status code.
11234 * @param pVCpu The cross context virtual CPU structure.
11235 * @param cbValue The size of the I/O port access (1, 2, or 4).
11236 * @param enmAddrMode The addressing mode.
11237 * @param fRepPrefix Indicates whether a repeat prefix is used
11238 * (doesn't matter which for this instruction).
11239 * @param cbInstr The instruction length in bytes.
11240 * @param iEffSeg The effective segment address.
11241 */
11242VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11243 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11244{
11245 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11246 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11247
11248 /*
11249 * State init.
11250 */
11251 PIEMCPU pIemCpu = &pVCpu->iem.s;
11252 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11253
11254 /*
11255 * Switch orgy for getting to the right handler.
11256 */
11257 VBOXSTRICTRC rcStrict;
11258 if (fRepPrefix)
11259 {
11260 switch (enmAddrMode)
11261 {
11262 case IEMMODE_16BIT:
11263 switch (cbValue)
11264 {
11265 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11266 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11267 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11268 default:
11269 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11270 }
11271 break;
11272
11273 case IEMMODE_32BIT:
11274 switch (cbValue)
11275 {
11276 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11277 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11278 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11279 default:
11280 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11281 }
11282 break;
11283
11284 case IEMMODE_64BIT:
11285 switch (cbValue)
11286 {
11287 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11288 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11289 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11290 default:
11291 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11292 }
11293 break;
11294
11295 default:
11296 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11297 }
11298 }
11299 else
11300 {
11301 switch (enmAddrMode)
11302 {
11303 case IEMMODE_16BIT:
11304 switch (cbValue)
11305 {
11306 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11307 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11308 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11309 default:
11310 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11311 }
11312 break;
11313
11314 case IEMMODE_32BIT:
11315 switch (cbValue)
11316 {
11317 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11318 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11319 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11320 default:
11321 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11322 }
11323 break;
11324
11325 case IEMMODE_64BIT:
11326 switch (cbValue)
11327 {
11328 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11329 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11330 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11331 default:
11332 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11333 }
11334 break;
11335
11336 default:
11337 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11338 }
11339 }
11340
11341 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11342}
11343
11344
11345/**
11346 * Interface for HM and EM for executing string I/O IN (read) instructions.
11347 *
11348 * This API ASSUMES that the caller has already verified that the guest code is
11349 * allowed to access the I/O port. (The I/O port is in the DX register in the
11350 * guest state.)
11351 *
11352 * @returns Strict VBox status code.
11353 * @param pVCpu The cross context virtual CPU structure.
11354 * @param cbValue The size of the I/O port access (1, 2, or 4).
11355 * @param enmAddrMode The addressing mode.
11356 * @param fRepPrefix Indicates whether a repeat prefix is used
11357 * (doesn't matter which for this instruction).
11358 * @param cbInstr The instruction length in bytes.
11359 */
11360VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11361 bool fRepPrefix, uint8_t cbInstr)
11362{
11363 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11364
11365 /*
11366 * State init.
11367 */
11368 PIEMCPU pIemCpu = &pVCpu->iem.s;
11369 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11370
11371 /*
11372 * Switch orgy for getting to the right handler.
11373 */
11374 VBOXSTRICTRC rcStrict;
11375 if (fRepPrefix)
11376 {
11377 switch (enmAddrMode)
11378 {
11379 case IEMMODE_16BIT:
11380 switch (cbValue)
11381 {
11382 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11383 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11384 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11385 default:
11386 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11387 }
11388 break;
11389
11390 case IEMMODE_32BIT:
11391 switch (cbValue)
11392 {
11393 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11394 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11395 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11396 default:
11397 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11398 }
11399 break;
11400
11401 case IEMMODE_64BIT:
11402 switch (cbValue)
11403 {
11404 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11405 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11406 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11407 default:
11408 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11409 }
11410 break;
11411
11412 default:
11413 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11414 }
11415 }
11416 else
11417 {
11418 switch (enmAddrMode)
11419 {
11420 case IEMMODE_16BIT:
11421 switch (cbValue)
11422 {
11423 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11424 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11425 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11426 default:
11427 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11428 }
11429 break;
11430
11431 case IEMMODE_32BIT:
11432 switch (cbValue)
11433 {
11434 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11435 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11436 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11437 default:
11438 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11439 }
11440 break;
11441
11442 case IEMMODE_64BIT:
11443 switch (cbValue)
11444 {
11445 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11446 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11447 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11448 default:
11449 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11450 }
11451 break;
11452
11453 default:
11454 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11455 }
11456 }
11457
11458 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11459}
11460
11461
11462
11463/**
11464 * Interface for HM and EM to write to a CRx register.
11465 *
11466 * @returns Strict VBox status code.
11467 * @param pVCpu The cross context virtual CPU structure.
11468 * @param cbInstr The instruction length in bytes.
11469 * @param iCrReg The control register number (destination).
11470 * @param iGReg The general purpose register number (source).
11471 *
11472 * @remarks In ring-0 not all of the state needs to be synced in.
11473 */
11474VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11475{
11476 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11477 Assert(iCrReg < 16);
11478 Assert(iGReg < 16);
11479
11480 PIEMCPU pIemCpu = &pVCpu->iem.s;
11481 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11482 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11483 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11484}
11485
11486
11487/**
11488 * Interface for HM and EM to read from a CRx register.
11489 *
11490 * @returns Strict VBox status code.
11491 * @param pVCpu The cross context virtual CPU structure.
11492 * @param cbInstr The instruction length in bytes.
11493 * @param iGReg The general purpose register number (destination).
11494 * @param iCrReg The control register number (source).
11495 *
11496 * @remarks In ring-0 not all of the state needs to be synced in.
11497 */
11498VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11499{
11500 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11501 Assert(iCrReg < 16);
11502 Assert(iGReg < 16);
11503
11504 PIEMCPU pIemCpu = &pVCpu->iem.s;
11505 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11506 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11507 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11508}
11509
11510
11511/**
11512 * Interface for HM and EM to clear the CR0[TS] bit.
11513 *
11514 * @returns Strict VBox status code.
11515 * @param pVCpu The cross context virtual CPU structure.
11516 * @param cbInstr The instruction length in bytes.
11517 *
11518 * @remarks In ring-0 not all of the state needs to be synced in.
11519 */
11520VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11521{
11522 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11523
11524 PIEMCPU pIemCpu = &pVCpu->iem.s;
11525 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11526 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11527 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11528}
11529
11530
11531/**
11532 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11533 *
11534 * @returns Strict VBox status code.
11535 * @param pVCpu The cross context virtual CPU structure.
11536 * @param cbInstr The instruction length in bytes.
11537 * @param uValue The value to load into CR0.
11538 *
11539 * @remarks In ring-0 not all of the state needs to be synced in.
11540 */
11541VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11542{
11543 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11544
11545 PIEMCPU pIemCpu = &pVCpu->iem.s;
11546 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11547 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11548 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11549}
11550
11551
11552/**
11553 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11554 *
11555 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11556 *
11557 * @returns Strict VBox status code.
11558 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11559 * @param cbInstr The instruction length in bytes.
11560 * @remarks In ring-0 not all of the state needs to be synced in.
11561 * @thread EMT(pVCpu)
11562 */
11563VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11564{
11565 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11566
11567 PIEMCPU pIemCpu = &pVCpu->iem.s;
11568 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11569 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11570 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11571}
11572
11573#ifdef IN_RING3
11574
11575/**
11576 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11577 *
11578 * @returns Merge between @a rcStrict and what the commit operation returned.
11579 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11580 * @param rcStrict The status code returned by ring-0 or raw-mode.
11581 */
11582VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3DoPendingAction(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
11583{
11584 PIEMCPU pIemCpu = &pVCpu->iem.s;
11585
11586 /*
11587 * Retrieve and reset the pending commit.
11588 */
11589 IEMCOMMIT const enmFn = pIemCpu->PendingCommit.enmFn;
11590 pIemCpu->PendingCommit.enmFn = IEMCOMMIT_INVALID;
11591 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11592
11593 /*
11594 * Must reset pass-up status code.
11595 */
11596 pIemCpu->rcPassUp = VINF_SUCCESS;
11597
11598 /*
11599 * Call the function. Currently using switch here instead of function
11600 * pointer table as a switch won't get skewed.
11601 */
11602 VBOXSTRICTRC rcStrictCommit;
11603 switch (enmFn)
11604 {
11605 case IEMCOMMIT_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11606 case IEMCOMMIT_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11607 case IEMCOMMIT_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11608 case IEMCOMMIT_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11609 case IEMCOMMIT_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11610 case IEMCOMMIT_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11611 case IEMCOMMIT_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11612 case IEMCOMMIT_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11613 case IEMCOMMIT_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11614 case IEMCOMMIT_REP_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11615 case IEMCOMMIT_REP_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11616 case IEMCOMMIT_REP_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11617 case IEMCOMMIT_REP_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11618 case IEMCOMMIT_REP_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11619 case IEMCOMMIT_REP_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11620 case IEMCOMMIT_REP_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11621 case IEMCOMMIT_REP_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11622 case IEMCOMMIT_REP_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11623 default:
11624 AssertLogRelMsgFailedReturn(("enmFn=%#x (%d)\n", pIemCpu->PendingCommit.enmFn, pIemCpu->PendingCommit.enmFn), VERR_IEM_IPE_2);
11625 }
11626
11627 /*
11628 * Merge status code (if any) with the incomming one.
11629 */
11630 rcStrictCommit = iemExecStatusCodeFiddling(pIemCpu, rcStrictCommit);
11631 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11632 return rcStrict;
11633 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11634 return rcStrictCommit;
11635
11636 /* Complicated. */
11637 if (RT_FAILURE(rcStrict))
11638 return rcStrict;
11639 if (RT_FAILURE(rcStrictCommit))
11640 return rcStrictCommit;
11641 if ( rcStrict >= VINF_EM_FIRST
11642 && rcStrict <= VINF_EM_LAST)
11643 {
11644 if ( rcStrictCommit >= VINF_EM_FIRST
11645 && rcStrictCommit <= VINF_EM_LAST)
11646 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11647
11648 /* This really shouldn't happen. Check PGM + handler code! */
11649 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_1);
11650 }
11651 /* This shouldn't really happen either, see IOM_SUCCESS. */
11652 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_2);
11653}
11654
11655#endif /* IN_RING */
11656
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette