VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 57350

Last change on this file since 57350 was 57237, checked in by vboxsync, 9 years ago

Fix doxygen bugref w/ comments to use the URL format.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 442.0 KB
Line 
1/* $Id: IEMAll.cpp 57237 2015-08-07 10:24:50Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78#define IEM_IMPLEMENTS_TASKSWITCH
79
80/*******************************************************************************
81* Header Files *
82*******************************************************************************/
83#define LOG_GROUP LOG_GROUP_IEM
84#include <VBox/vmm/iem.h>
85#include <VBox/vmm/cpum.h>
86#include <VBox/vmm/pdm.h>
87#include <VBox/vmm/pgm.h>
88#include <internal/pgm.h>
89#include <VBox/vmm/iom.h>
90#include <VBox/vmm/em.h>
91#include <VBox/vmm/hm.h>
92#include <VBox/vmm/tm.h>
93#include <VBox/vmm/dbgf.h>
94#include <VBox/vmm/dbgftrace.h>
95#ifdef VBOX_WITH_RAW_MODE_NOT_R0
96# include <VBox/vmm/patm.h>
97# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
98# include <VBox/vmm/csam.h>
99# endif
100#endif
101#include "IEMInternal.h"
102#ifdef IEM_VERIFICATION_MODE_FULL
103# include <VBox/vmm/rem.h>
104# include <VBox/vmm/mm.h>
105#endif
106#include <VBox/vmm/vm.h>
107#include <VBox/log.h>
108#include <VBox/err.h>
109#include <VBox/param.h>
110#include <VBox/dis.h>
111#include <VBox/disopcode.h>
112#include <iprt/assert.h>
113#include <iprt/string.h>
114#include <iprt/x86.h>
115
116
117
118/*******************************************************************************
119* Structures and Typedefs *
120*******************************************************************************/
121/** @typedef PFNIEMOP
122 * Pointer to an opcode decoder function.
123 */
124
125/** @def FNIEMOP_DEF
126 * Define an opcode decoder function.
127 *
128 * We're using macors for this so that adding and removing parameters as well as
129 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
130 *
131 * @param a_Name The function name.
132 */
133
134
135#if defined(__GNUC__) && defined(RT_ARCH_X86)
136typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
137# define FNIEMOP_DEF(a_Name) \
138 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
139# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
140 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
141# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
142 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
143
144#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
145typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
146# define FNIEMOP_DEF(a_Name) \
147 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
148# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
149 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
150# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
151 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
152
153#elif defined(__GNUC__)
154typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#else
163typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
170
171#endif
172
173
174/**
175 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
176 */
177typedef union IEMSELDESC
178{
179 /** The legacy view. */
180 X86DESC Legacy;
181 /** The long mode view. */
182 X86DESC64 Long;
183} IEMSELDESC;
184/** Pointer to a selector descriptor table entry. */
185typedef IEMSELDESC *PIEMSELDESC;
186
187
188/*******************************************************************************
189* Defined Constants And Macros *
190*******************************************************************************/
191/** Temporary hack to disable the double execution. Will be removed in favor
192 * of a dedicated execution mode in EM. */
193//#define IEM_VERIFICATION_MODE_NO_REM
194
195/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
196 * due to GCC lacking knowledge about the value range of a switch. */
197#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
198
199/**
200 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
201 * occation.
202 */
203#ifdef LOG_ENABLED
204# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
205 do { \
206 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
207 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
208 } while (0)
209#else
210# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
212#endif
213
214/**
215 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
216 * occation using the supplied logger statement.
217 *
218 * @param a_LoggerArgs What to log on failure.
219 */
220#ifdef LOG_ENABLED
221# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
222 do { \
223 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
224 /*LogFunc(a_LoggerArgs);*/ \
225 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
226 } while (0)
227#else
228# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
230#endif
231
232/**
233 * Call an opcode decoder function.
234 *
235 * We're using macors for this so that adding and removing parameters can be
236 * done as we please. See FNIEMOP_DEF.
237 */
238#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
239
240/**
241 * Call a common opcode decoder function taking one extra argument.
242 *
243 * We're using macors for this so that adding and removing parameters can be
244 * done as we please. See FNIEMOP_DEF_1.
245 */
246#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
247
248/**
249 * Call a common opcode decoder function taking one extra argument.
250 *
251 * We're using macors for this so that adding and removing parameters can be
252 * done as we please. See FNIEMOP_DEF_1.
253 */
254#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
255
256/**
257 * Check if we're currently executing in real or virtual 8086 mode.
258 *
259 * @returns @c true if it is, @c false if not.
260 * @param a_pIemCpu The IEM state of the current CPU.
261 */
262#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
263
264/**
265 * Check if we're currently executing in virtual 8086 mode.
266 *
267 * @returns @c true if it is, @c false if not.
268 * @param a_pIemCpu The IEM state of the current CPU.
269 */
270#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
271
272/**
273 * Check if we're currently executing in long mode.
274 *
275 * @returns @c true if it is, @c false if not.
276 * @param a_pIemCpu The IEM state of the current CPU.
277 */
278#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
279
280/**
281 * Check if we're currently executing in real mode.
282 *
283 * @returns @c true if it is, @c false if not.
284 * @param a_pIemCpu The IEM state of the current CPU.
285 */
286#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
287
288/**
289 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
290 * @returns PCCPUMFEATURES
291 * @param a_pIemCpu The IEM state of the current CPU.
292 */
293#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
294
295/**
296 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
297 * @returns PCCPUMFEATURES
298 * @param a_pIemCpu The IEM state of the current CPU.
299 */
300#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
301
302/**
303 * Evaluates to true if we're presenting an Intel CPU to the guest.
304 */
305#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
306
307/**
308 * Evaluates to true if we're presenting an AMD CPU to the guest.
309 */
310#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
311
312/**
313 * Check if the address is canonical.
314 */
315#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
316
317
318/*******************************************************************************
319* Global Variables *
320*******************************************************************************/
321extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
322
323
324/** Function table for the ADD instruction. */
325IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
326{
327 iemAImpl_add_u8, iemAImpl_add_u8_locked,
328 iemAImpl_add_u16, iemAImpl_add_u16_locked,
329 iemAImpl_add_u32, iemAImpl_add_u32_locked,
330 iemAImpl_add_u64, iemAImpl_add_u64_locked
331};
332
333/** Function table for the ADC instruction. */
334IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
335{
336 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
337 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
338 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
339 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
340};
341
342/** Function table for the SUB instruction. */
343IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
344{
345 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
346 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
347 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
348 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
349};
350
351/** Function table for the SBB instruction. */
352IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
353{
354 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
355 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
356 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
357 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
358};
359
360/** Function table for the OR instruction. */
361IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
362{
363 iemAImpl_or_u8, iemAImpl_or_u8_locked,
364 iemAImpl_or_u16, iemAImpl_or_u16_locked,
365 iemAImpl_or_u32, iemAImpl_or_u32_locked,
366 iemAImpl_or_u64, iemAImpl_or_u64_locked
367};
368
369/** Function table for the XOR instruction. */
370IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
371{
372 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
373 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
374 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
375 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
376};
377
378/** Function table for the AND instruction. */
379IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
380{
381 iemAImpl_and_u8, iemAImpl_and_u8_locked,
382 iemAImpl_and_u16, iemAImpl_and_u16_locked,
383 iemAImpl_and_u32, iemAImpl_and_u32_locked,
384 iemAImpl_and_u64, iemAImpl_and_u64_locked
385};
386
387/** Function table for the CMP instruction.
388 * @remarks Making operand order ASSUMPTIONS.
389 */
390IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
391{
392 iemAImpl_cmp_u8, NULL,
393 iemAImpl_cmp_u16, NULL,
394 iemAImpl_cmp_u32, NULL,
395 iemAImpl_cmp_u64, NULL
396};
397
398/** Function table for the TEST instruction.
399 * @remarks Making operand order ASSUMPTIONS.
400 */
401IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
402{
403 iemAImpl_test_u8, NULL,
404 iemAImpl_test_u16, NULL,
405 iemAImpl_test_u32, NULL,
406 iemAImpl_test_u64, NULL
407};
408
409/** Function table for the BT instruction. */
410IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
411{
412 NULL, NULL,
413 iemAImpl_bt_u16, NULL,
414 iemAImpl_bt_u32, NULL,
415 iemAImpl_bt_u64, NULL
416};
417
418/** Function table for the BTC instruction. */
419IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
420{
421 NULL, NULL,
422 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
423 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
424 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
425};
426
427/** Function table for the BTR instruction. */
428IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
429{
430 NULL, NULL,
431 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
432 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
433 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
434};
435
436/** Function table for the BTS instruction. */
437IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
438{
439 NULL, NULL,
440 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
441 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
442 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
443};
444
445/** Function table for the BSF instruction. */
446IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
447{
448 NULL, NULL,
449 iemAImpl_bsf_u16, NULL,
450 iemAImpl_bsf_u32, NULL,
451 iemAImpl_bsf_u64, NULL
452};
453
454/** Function table for the BSR instruction. */
455IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
456{
457 NULL, NULL,
458 iemAImpl_bsr_u16, NULL,
459 iemAImpl_bsr_u32, NULL,
460 iemAImpl_bsr_u64, NULL
461};
462
463/** Function table for the IMUL instruction. */
464IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
465{
466 NULL, NULL,
467 iemAImpl_imul_two_u16, NULL,
468 iemAImpl_imul_two_u32, NULL,
469 iemAImpl_imul_two_u64, NULL
470};
471
472/** Group 1 /r lookup table. */
473IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
474{
475 &g_iemAImpl_add,
476 &g_iemAImpl_or,
477 &g_iemAImpl_adc,
478 &g_iemAImpl_sbb,
479 &g_iemAImpl_and,
480 &g_iemAImpl_sub,
481 &g_iemAImpl_xor,
482 &g_iemAImpl_cmp
483};
484
485/** Function table for the INC instruction. */
486IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
487{
488 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
489 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
490 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
491 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
492};
493
494/** Function table for the DEC instruction. */
495IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
496{
497 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
498 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
499 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
500 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
501};
502
503/** Function table for the NEG instruction. */
504IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
505{
506 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
507 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
508 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
509 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
510};
511
512/** Function table for the NOT instruction. */
513IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
514{
515 iemAImpl_not_u8, iemAImpl_not_u8_locked,
516 iemAImpl_not_u16, iemAImpl_not_u16_locked,
517 iemAImpl_not_u32, iemAImpl_not_u32_locked,
518 iemAImpl_not_u64, iemAImpl_not_u64_locked
519};
520
521
522/** Function table for the ROL instruction. */
523IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
524{
525 iemAImpl_rol_u8,
526 iemAImpl_rol_u16,
527 iemAImpl_rol_u32,
528 iemAImpl_rol_u64
529};
530
531/** Function table for the ROR instruction. */
532IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
533{
534 iemAImpl_ror_u8,
535 iemAImpl_ror_u16,
536 iemAImpl_ror_u32,
537 iemAImpl_ror_u64
538};
539
540/** Function table for the RCL instruction. */
541IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
542{
543 iemAImpl_rcl_u8,
544 iemAImpl_rcl_u16,
545 iemAImpl_rcl_u32,
546 iemAImpl_rcl_u64
547};
548
549/** Function table for the RCR instruction. */
550IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
551{
552 iemAImpl_rcr_u8,
553 iemAImpl_rcr_u16,
554 iemAImpl_rcr_u32,
555 iemAImpl_rcr_u64
556};
557
558/** Function table for the SHL instruction. */
559IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
560{
561 iemAImpl_shl_u8,
562 iemAImpl_shl_u16,
563 iemAImpl_shl_u32,
564 iemAImpl_shl_u64
565};
566
567/** Function table for the SHR instruction. */
568IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
569{
570 iemAImpl_shr_u8,
571 iemAImpl_shr_u16,
572 iemAImpl_shr_u32,
573 iemAImpl_shr_u64
574};
575
576/** Function table for the SAR instruction. */
577IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
578{
579 iemAImpl_sar_u8,
580 iemAImpl_sar_u16,
581 iemAImpl_sar_u32,
582 iemAImpl_sar_u64
583};
584
585
586/** Function table for the MUL instruction. */
587IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
588{
589 iemAImpl_mul_u8,
590 iemAImpl_mul_u16,
591 iemAImpl_mul_u32,
592 iemAImpl_mul_u64
593};
594
595/** Function table for the IMUL instruction working implicitly on rAX. */
596IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
597{
598 iemAImpl_imul_u8,
599 iemAImpl_imul_u16,
600 iemAImpl_imul_u32,
601 iemAImpl_imul_u64
602};
603
604/** Function table for the DIV instruction. */
605IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
606{
607 iemAImpl_div_u8,
608 iemAImpl_div_u16,
609 iemAImpl_div_u32,
610 iemAImpl_div_u64
611};
612
613/** Function table for the MUL instruction. */
614IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
615{
616 iemAImpl_idiv_u8,
617 iemAImpl_idiv_u16,
618 iemAImpl_idiv_u32,
619 iemAImpl_idiv_u64
620};
621
622/** Function table for the SHLD instruction */
623IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
624{
625 iemAImpl_shld_u16,
626 iemAImpl_shld_u32,
627 iemAImpl_shld_u64,
628};
629
630/** Function table for the SHRD instruction */
631IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
632{
633 iemAImpl_shrd_u16,
634 iemAImpl_shrd_u32,
635 iemAImpl_shrd_u64,
636};
637
638
639/** Function table for the PUNPCKLBW instruction */
640IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
641/** Function table for the PUNPCKLBD instruction */
642IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
643/** Function table for the PUNPCKLDQ instruction */
644IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
645/** Function table for the PUNPCKLQDQ instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
647
648/** Function table for the PUNPCKHBW instruction */
649IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
650/** Function table for the PUNPCKHBD instruction */
651IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
652/** Function table for the PUNPCKHDQ instruction */
653IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
654/** Function table for the PUNPCKHQDQ instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
656
657/** Function table for the PXOR instruction */
658IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
659/** Function table for the PCMPEQB instruction */
660IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
661/** Function table for the PCMPEQW instruction */
662IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
663/** Function table for the PCMPEQD instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
665
666
667#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
668/** What IEM just wrote. */
669uint8_t g_abIemWrote[256];
670/** How much IEM just wrote. */
671size_t g_cbIemWrote;
672#endif
673
674
675/*******************************************************************************
676* Internal Functions *
677*******************************************************************************/
678IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
679IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
680IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
681IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
682/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
683IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
684IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
685IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
686IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
687IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
688IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
689IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
691IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
692IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
693IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
694IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
695IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
696IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
698IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
699IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
700IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
701IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
704IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
705IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
706IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
707IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
708IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
709IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
710IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
711
712#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
713IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
714#endif
715IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
716IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
717
718
719
720/**
721 * Sets the pass up status.
722 *
723 * @returns VINF_SUCCESS.
724 * @param pIemCpu The per CPU IEM state of the calling thread.
725 * @param rcPassUp The pass up status. Must be informational.
726 * VINF_SUCCESS is not allowed.
727 */
728IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
729{
730 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
731
732 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
733 if (rcOldPassUp == VINF_SUCCESS)
734 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
735 /* If both are EM scheduling codes, use EM priority rules. */
736 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
737 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
738 {
739 if (rcPassUp < rcOldPassUp)
740 {
741 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
742 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
743 }
744 else
745 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
746 }
747 /* Override EM scheduling with specific status code. */
748 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
749 {
750 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
751 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
752 }
753 /* Don't override specific status code, first come first served. */
754 else
755 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
756 return VINF_SUCCESS;
757}
758
759
760/**
761 * Initializes the execution state.
762 *
763 * @param pIemCpu The per CPU IEM state.
764 * @param fBypassHandlers Whether to bypass access handlers.
765 */
766DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
767{
768 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
769 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
770
771 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
772 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
773
774#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
775 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
776 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
778 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
779 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
780 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
781 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
782 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
783#endif
784
785#ifdef VBOX_WITH_RAW_MODE_NOT_R0
786 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
787#endif
788 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
789 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
790 ? IEMMODE_64BIT
791 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
792 ? IEMMODE_32BIT
793 : IEMMODE_16BIT;
794 pIemCpu->enmCpuMode = enmMode;
795#ifdef VBOX_STRICT
796 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
797 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
798 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
799 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
800 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
801 pIemCpu->uRexReg = 127;
802 pIemCpu->uRexB = 127;
803 pIemCpu->uRexIndex = 127;
804 pIemCpu->iEffSeg = 127;
805 pIemCpu->offOpcode = 127;
806 pIemCpu->cbOpcode = 127;
807#endif
808
809 pIemCpu->cActiveMappings = 0;
810 pIemCpu->iNextMapping = 0;
811 pIemCpu->rcPassUp = VINF_SUCCESS;
812 pIemCpu->fBypassHandlers = fBypassHandlers;
813#ifdef VBOX_WITH_RAW_MODE_NOT_R0
814 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
815 && pCtx->cs.u64Base == 0
816 && pCtx->cs.u32Limit == UINT32_MAX
817 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
818 if (!pIemCpu->fInPatchCode)
819 CPUMRawLeave(pVCpu, VINF_SUCCESS);
820#endif
821}
822
823
824/**
825 * Initializes the decoder state.
826 *
827 * @param pIemCpu The per CPU IEM state.
828 * @param fBypassHandlers Whether to bypass access handlers.
829 */
830DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
831{
832 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
833 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
834
835 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
836 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
837
838#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
839 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
840 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
841 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
842 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
843 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
844 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
847#endif
848
849#ifdef VBOX_WITH_RAW_MODE_NOT_R0
850 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
851#endif
852 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
853#ifdef IEM_VERIFICATION_MODE_FULL
854 if (pIemCpu->uInjectCpl != UINT8_MAX)
855 pIemCpu->uCpl = pIemCpu->uInjectCpl;
856#endif
857 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
858 ? IEMMODE_64BIT
859 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
860 ? IEMMODE_32BIT
861 : IEMMODE_16BIT;
862 pIemCpu->enmCpuMode = enmMode;
863 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
864 pIemCpu->enmEffAddrMode = enmMode;
865 if (enmMode != IEMMODE_64BIT)
866 {
867 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
868 pIemCpu->enmEffOpSize = enmMode;
869 }
870 else
871 {
872 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
873 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
874 }
875 pIemCpu->fPrefixes = 0;
876 pIemCpu->uRexReg = 0;
877 pIemCpu->uRexB = 0;
878 pIemCpu->uRexIndex = 0;
879 pIemCpu->iEffSeg = X86_SREG_DS;
880 pIemCpu->offOpcode = 0;
881 pIemCpu->cbOpcode = 0;
882 pIemCpu->cActiveMappings = 0;
883 pIemCpu->iNextMapping = 0;
884 pIemCpu->rcPassUp = VINF_SUCCESS;
885 pIemCpu->fBypassHandlers = fBypassHandlers;
886#ifdef VBOX_WITH_RAW_MODE_NOT_R0
887 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
888 && pCtx->cs.u64Base == 0
889 && pCtx->cs.u32Limit == UINT32_MAX
890 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
891 if (!pIemCpu->fInPatchCode)
892 CPUMRawLeave(pVCpu, VINF_SUCCESS);
893#endif
894
895#ifdef DBGFTRACE_ENABLED
896 switch (enmMode)
897 {
898 case IEMMODE_64BIT:
899 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
900 break;
901 case IEMMODE_32BIT:
902 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
903 break;
904 case IEMMODE_16BIT:
905 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
906 break;
907 }
908#endif
909}
910
911
912/**
913 * Prefetch opcodes the first time when starting executing.
914 *
915 * @returns Strict VBox status code.
916 * @param pIemCpu The IEM state.
917 * @param fBypassHandlers Whether to bypass access handlers.
918 */
919IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
920{
921#ifdef IEM_VERIFICATION_MODE_FULL
922 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
923#endif
924 iemInitDecoder(pIemCpu, fBypassHandlers);
925
926 /*
927 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
928 *
929 * First translate CS:rIP to a physical address.
930 */
931 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
932 uint32_t cbToTryRead;
933 RTGCPTR GCPtrPC;
934 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
935 {
936 cbToTryRead = PAGE_SIZE;
937 GCPtrPC = pCtx->rip;
938 if (!IEM_IS_CANONICAL(GCPtrPC))
939 return iemRaiseGeneralProtectionFault0(pIemCpu);
940 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
941 }
942 else
943 {
944 uint32_t GCPtrPC32 = pCtx->eip;
945 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
946 if (GCPtrPC32 > pCtx->cs.u32Limit)
947 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
948 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
949 if (!cbToTryRead) /* overflowed */
950 {
951 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
952 cbToTryRead = UINT32_MAX;
953 }
954 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
955 Assert(GCPtrPC <= UINT32_MAX);
956 }
957
958#ifdef VBOX_WITH_RAW_MODE_NOT_R0
959 /* Allow interpretation of patch manager code blocks since they can for
960 instance throw #PFs for perfectly good reasons. */
961 if (pIemCpu->fInPatchCode)
962 {
963 size_t cbRead = 0;
964 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
965 AssertRCReturn(rc, rc);
966 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
967 return VINF_SUCCESS;
968 }
969#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
970
971 RTGCPHYS GCPhys;
972 uint64_t fFlags;
973 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
974 if (RT_FAILURE(rc))
975 {
976 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
977 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
978 }
979 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
980 {
981 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
982 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
983 }
984 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
985 {
986 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
987 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
988 }
989 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
990 /** @todo Check reserved bits and such stuff. PGM is better at doing
991 * that, so do it when implementing the guest virtual address
992 * TLB... */
993
994#ifdef IEM_VERIFICATION_MODE_FULL
995 /*
996 * Optimistic optimization: Use unconsumed opcode bytes from the previous
997 * instruction.
998 */
999 /** @todo optimize this differently by not using PGMPhysRead. */
1000 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1001 pIemCpu->GCPhysOpcodes = GCPhys;
1002 if ( offPrevOpcodes < cbOldOpcodes
1003 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1004 {
1005 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1006 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1007 pIemCpu->cbOpcode = cbNew;
1008 return VINF_SUCCESS;
1009 }
1010#endif
1011
1012 /*
1013 * Read the bytes at this address.
1014 */
1015 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1016#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1017 size_t cbActual;
1018 if ( PATMIsEnabled(pVM)
1019 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1020 {
1021 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1022 Assert(cbActual > 0);
1023 pIemCpu->cbOpcode = (uint8_t)cbActual;
1024 }
1025 else
1026#endif
1027 {
1028 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1029 if (cbToTryRead > cbLeftOnPage)
1030 cbToTryRead = cbLeftOnPage;
1031 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1032 cbToTryRead = sizeof(pIemCpu->abOpcode);
1033
1034 if (!pIemCpu->fBypassHandlers)
1035 {
1036 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1037 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1038 { /* likely */ }
1039 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1040 {
1041 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1042 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1043 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1044 }
1045 else
1046 {
1047 Log((RT_SUCCESS(rcStrict)
1048 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1049 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1050 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1051 return rcStrict;
1052 }
1053 }
1054 else
1055 {
1056 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1057 if (RT_SUCCESS(rc))
1058 { /* likely */ }
1059 else
1060 {
1061 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1062 GCPtrPC, GCPhys, rc, cbToTryRead));
1063 return rc;
1064 }
1065 }
1066 pIemCpu->cbOpcode = cbToTryRead;
1067 }
1068
1069 return VINF_SUCCESS;
1070}
1071
1072
1073/**
1074 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1075 * exception if it fails.
1076 *
1077 * @returns Strict VBox status code.
1078 * @param pIemCpu The IEM state.
1079 * @param cbMin The minimum number of bytes relative offOpcode
1080 * that must be read.
1081 */
1082IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1083{
1084 /*
1085 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1086 *
1087 * First translate CS:rIP to a physical address.
1088 */
1089 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1090 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1091 uint32_t cbToTryRead;
1092 RTGCPTR GCPtrNext;
1093 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1094 {
1095 cbToTryRead = PAGE_SIZE;
1096 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1097 if (!IEM_IS_CANONICAL(GCPtrNext))
1098 return iemRaiseGeneralProtectionFault0(pIemCpu);
1099 }
1100 else
1101 {
1102 uint32_t GCPtrNext32 = pCtx->eip;
1103 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1104 GCPtrNext32 += pIemCpu->cbOpcode;
1105 if (GCPtrNext32 > pCtx->cs.u32Limit)
1106 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1107 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1108 if (!cbToTryRead) /* overflowed */
1109 {
1110 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1111 cbToTryRead = UINT32_MAX;
1112 /** @todo check out wrapping around the code segment. */
1113 }
1114 if (cbToTryRead < cbMin - cbLeft)
1115 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1116 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1117 }
1118
1119 /* Only read up to the end of the page, and make sure we don't read more
1120 than the opcode buffer can hold. */
1121 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1122 if (cbToTryRead > cbLeftOnPage)
1123 cbToTryRead = cbLeftOnPage;
1124 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1125 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1126/** @todo r=bird: Convert assertion into undefined opcode exception? */
1127 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1128
1129#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1130 /* Allow interpretation of patch manager code blocks since they can for
1131 instance throw #PFs for perfectly good reasons. */
1132 if (pIemCpu->fInPatchCode)
1133 {
1134 size_t cbRead = 0;
1135 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1136 AssertRCReturn(rc, rc);
1137 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1138 return VINF_SUCCESS;
1139 }
1140#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1141
1142 RTGCPHYS GCPhys;
1143 uint64_t fFlags;
1144 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1145 if (RT_FAILURE(rc))
1146 {
1147 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1148 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1149 }
1150 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1151 {
1152 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1153 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1154 }
1155 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1156 {
1157 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1158 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1159 }
1160 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1161 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1162 /** @todo Check reserved bits and such stuff. PGM is better at doing
1163 * that, so do it when implementing the guest virtual address
1164 * TLB... */
1165
1166 /*
1167 * Read the bytes at this address.
1168 *
1169 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1170 * and since PATM should only patch the start of an instruction there
1171 * should be no need to check again here.
1172 */
1173 if (!pIemCpu->fBypassHandlers)
1174 {
1175 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1176 cbToTryRead, PGMACCESSORIGIN_IEM);
1177 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1178 { /* likely */ }
1179 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1180 {
1181 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1182 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1183 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1184 }
1185 else
1186 {
1187 Log((RT_SUCCESS(rcStrict)
1188 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1189 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1190 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1191 return rcStrict;
1192 }
1193 }
1194 else
1195 {
1196 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1197 if (RT_SUCCESS(rc))
1198 { /* likely */ }
1199 else
1200 {
1201 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1202 return rc;
1203 }
1204 }
1205 pIemCpu->cbOpcode += cbToTryRead;
1206 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1207
1208 return VINF_SUCCESS;
1209}
1210
1211
1212/**
1213 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1214 *
1215 * @returns Strict VBox status code.
1216 * @param pIemCpu The IEM state.
1217 * @param pb Where to return the opcode byte.
1218 */
1219DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1220{
1221 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1222 if (rcStrict == VINF_SUCCESS)
1223 {
1224 uint8_t offOpcode = pIemCpu->offOpcode;
1225 *pb = pIemCpu->abOpcode[offOpcode];
1226 pIemCpu->offOpcode = offOpcode + 1;
1227 }
1228 else
1229 *pb = 0;
1230 return rcStrict;
1231}
1232
1233
1234/**
1235 * Fetches the next opcode byte.
1236 *
1237 * @returns Strict VBox status code.
1238 * @param pIemCpu The IEM state.
1239 * @param pu8 Where to return the opcode byte.
1240 */
1241DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1242{
1243 uint8_t const offOpcode = pIemCpu->offOpcode;
1244 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1245 {
1246 *pu8 = pIemCpu->abOpcode[offOpcode];
1247 pIemCpu->offOpcode = offOpcode + 1;
1248 return VINF_SUCCESS;
1249 }
1250 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1251}
1252
1253
1254/**
1255 * Fetches the next opcode byte, returns automatically on failure.
1256 *
1257 * @param a_pu8 Where to return the opcode byte.
1258 * @remark Implicitly references pIemCpu.
1259 */
1260#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1261 do \
1262 { \
1263 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1264 if (rcStrict2 != VINF_SUCCESS) \
1265 return rcStrict2; \
1266 } while (0)
1267
1268
1269/**
1270 * Fetches the next signed byte from the opcode stream.
1271 *
1272 * @returns Strict VBox status code.
1273 * @param pIemCpu The IEM state.
1274 * @param pi8 Where to return the signed byte.
1275 */
1276DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1277{
1278 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1279}
1280
1281
1282/**
1283 * Fetches the next signed byte from the opcode stream, returning automatically
1284 * on failure.
1285 *
1286 * @param pi8 Where to return the signed byte.
1287 * @remark Implicitly references pIemCpu.
1288 */
1289#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1290 do \
1291 { \
1292 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1293 if (rcStrict2 != VINF_SUCCESS) \
1294 return rcStrict2; \
1295 } while (0)
1296
1297
1298/**
1299 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1300 *
1301 * @returns Strict VBox status code.
1302 * @param pIemCpu The IEM state.
1303 * @param pu16 Where to return the opcode dword.
1304 */
1305DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1306{
1307 uint8_t u8;
1308 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1309 if (rcStrict == VINF_SUCCESS)
1310 *pu16 = (int8_t)u8;
1311 return rcStrict;
1312}
1313
1314
1315/**
1316 * Fetches the next signed byte from the opcode stream, extending it to
1317 * unsigned 16-bit.
1318 *
1319 * @returns Strict VBox status code.
1320 * @param pIemCpu The IEM state.
1321 * @param pu16 Where to return the unsigned word.
1322 */
1323DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1324{
1325 uint8_t const offOpcode = pIemCpu->offOpcode;
1326 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1327 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1328
1329 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1330 pIemCpu->offOpcode = offOpcode + 1;
1331 return VINF_SUCCESS;
1332}
1333
1334
1335/**
1336 * Fetches the next signed byte from the opcode stream and sign-extending it to
1337 * a word, returning automatically on failure.
1338 *
1339 * @param pu16 Where to return the word.
1340 * @remark Implicitly references pIemCpu.
1341 */
1342#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1343 do \
1344 { \
1345 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1346 if (rcStrict2 != VINF_SUCCESS) \
1347 return rcStrict2; \
1348 } while (0)
1349
1350
1351/**
1352 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1353 *
1354 * @returns Strict VBox status code.
1355 * @param pIemCpu The IEM state.
1356 * @param pu32 Where to return the opcode dword.
1357 */
1358DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1359{
1360 uint8_t u8;
1361 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1362 if (rcStrict == VINF_SUCCESS)
1363 *pu32 = (int8_t)u8;
1364 return rcStrict;
1365}
1366
1367
1368/**
1369 * Fetches the next signed byte from the opcode stream, extending it to
1370 * unsigned 32-bit.
1371 *
1372 * @returns Strict VBox status code.
1373 * @param pIemCpu The IEM state.
1374 * @param pu32 Where to return the unsigned dword.
1375 */
1376DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1377{
1378 uint8_t const offOpcode = pIemCpu->offOpcode;
1379 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1380 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1381
1382 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1383 pIemCpu->offOpcode = offOpcode + 1;
1384 return VINF_SUCCESS;
1385}
1386
1387
1388/**
1389 * Fetches the next signed byte from the opcode stream and sign-extending it to
1390 * a word, returning automatically on failure.
1391 *
1392 * @param pu32 Where to return the word.
1393 * @remark Implicitly references pIemCpu.
1394 */
1395#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1396 do \
1397 { \
1398 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1399 if (rcStrict2 != VINF_SUCCESS) \
1400 return rcStrict2; \
1401 } while (0)
1402
1403
1404/**
1405 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1406 *
1407 * @returns Strict VBox status code.
1408 * @param pIemCpu The IEM state.
1409 * @param pu64 Where to return the opcode qword.
1410 */
1411DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1412{
1413 uint8_t u8;
1414 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1415 if (rcStrict == VINF_SUCCESS)
1416 *pu64 = (int8_t)u8;
1417 return rcStrict;
1418}
1419
1420
1421/**
1422 * Fetches the next signed byte from the opcode stream, extending it to
1423 * unsigned 64-bit.
1424 *
1425 * @returns Strict VBox status code.
1426 * @param pIemCpu The IEM state.
1427 * @param pu64 Where to return the unsigned qword.
1428 */
1429DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1430{
1431 uint8_t const offOpcode = pIemCpu->offOpcode;
1432 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1433 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1434
1435 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1436 pIemCpu->offOpcode = offOpcode + 1;
1437 return VINF_SUCCESS;
1438}
1439
1440
1441/**
1442 * Fetches the next signed byte from the opcode stream and sign-extending it to
1443 * a word, returning automatically on failure.
1444 *
1445 * @param pu64 Where to return the word.
1446 * @remark Implicitly references pIemCpu.
1447 */
1448#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1449 do \
1450 { \
1451 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1452 if (rcStrict2 != VINF_SUCCESS) \
1453 return rcStrict2; \
1454 } while (0)
1455
1456
1457/**
1458 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1459 *
1460 * @returns Strict VBox status code.
1461 * @param pIemCpu The IEM state.
1462 * @param pu16 Where to return the opcode word.
1463 */
1464DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1465{
1466 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1467 if (rcStrict == VINF_SUCCESS)
1468 {
1469 uint8_t offOpcode = pIemCpu->offOpcode;
1470 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1471 pIemCpu->offOpcode = offOpcode + 2;
1472 }
1473 else
1474 *pu16 = 0;
1475 return rcStrict;
1476}
1477
1478
1479/**
1480 * Fetches the next opcode word.
1481 *
1482 * @returns Strict VBox status code.
1483 * @param pIemCpu The IEM state.
1484 * @param pu16 Where to return the opcode word.
1485 */
1486DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1487{
1488 uint8_t const offOpcode = pIemCpu->offOpcode;
1489 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1490 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1491
1492 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1493 pIemCpu->offOpcode = offOpcode + 2;
1494 return VINF_SUCCESS;
1495}
1496
1497
1498/**
1499 * Fetches the next opcode word, returns automatically on failure.
1500 *
1501 * @param a_pu16 Where to return the opcode word.
1502 * @remark Implicitly references pIemCpu.
1503 */
1504#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1505 do \
1506 { \
1507 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1508 if (rcStrict2 != VINF_SUCCESS) \
1509 return rcStrict2; \
1510 } while (0)
1511
1512
1513/**
1514 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1515 *
1516 * @returns Strict VBox status code.
1517 * @param pIemCpu The IEM state.
1518 * @param pu32 Where to return the opcode double word.
1519 */
1520DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1521{
1522 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1523 if (rcStrict == VINF_SUCCESS)
1524 {
1525 uint8_t offOpcode = pIemCpu->offOpcode;
1526 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1527 pIemCpu->offOpcode = offOpcode + 2;
1528 }
1529 else
1530 *pu32 = 0;
1531 return rcStrict;
1532}
1533
1534
1535/**
1536 * Fetches the next opcode word, zero extending it to a double word.
1537 *
1538 * @returns Strict VBox status code.
1539 * @param pIemCpu The IEM state.
1540 * @param pu32 Where to return the opcode double word.
1541 */
1542DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1543{
1544 uint8_t const offOpcode = pIemCpu->offOpcode;
1545 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1546 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1547
1548 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1549 pIemCpu->offOpcode = offOpcode + 2;
1550 return VINF_SUCCESS;
1551}
1552
1553
1554/**
1555 * Fetches the next opcode word and zero extends it to a double word, returns
1556 * automatically on failure.
1557 *
1558 * @param a_pu32 Where to return the opcode double word.
1559 * @remark Implicitly references pIemCpu.
1560 */
1561#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1562 do \
1563 { \
1564 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1565 if (rcStrict2 != VINF_SUCCESS) \
1566 return rcStrict2; \
1567 } while (0)
1568
1569
1570/**
1571 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1572 *
1573 * @returns Strict VBox status code.
1574 * @param pIemCpu The IEM state.
1575 * @param pu64 Where to return the opcode quad word.
1576 */
1577DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1578{
1579 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1580 if (rcStrict == VINF_SUCCESS)
1581 {
1582 uint8_t offOpcode = pIemCpu->offOpcode;
1583 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1584 pIemCpu->offOpcode = offOpcode + 2;
1585 }
1586 else
1587 *pu64 = 0;
1588 return rcStrict;
1589}
1590
1591
1592/**
1593 * Fetches the next opcode word, zero extending it to a quad word.
1594 *
1595 * @returns Strict VBox status code.
1596 * @param pIemCpu The IEM state.
1597 * @param pu64 Where to return the opcode quad word.
1598 */
1599DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1600{
1601 uint8_t const offOpcode = pIemCpu->offOpcode;
1602 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1603 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1604
1605 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1606 pIemCpu->offOpcode = offOpcode + 2;
1607 return VINF_SUCCESS;
1608}
1609
1610
1611/**
1612 * Fetches the next opcode word and zero extends it to a quad word, returns
1613 * automatically on failure.
1614 *
1615 * @param a_pu64 Where to return the opcode quad word.
1616 * @remark Implicitly references pIemCpu.
1617 */
1618#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1619 do \
1620 { \
1621 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1622 if (rcStrict2 != VINF_SUCCESS) \
1623 return rcStrict2; \
1624 } while (0)
1625
1626
1627/**
1628 * Fetches the next signed word from the opcode stream.
1629 *
1630 * @returns Strict VBox status code.
1631 * @param pIemCpu The IEM state.
1632 * @param pi16 Where to return the signed word.
1633 */
1634DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1635{
1636 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1637}
1638
1639
1640/**
1641 * Fetches the next signed word from the opcode stream, returning automatically
1642 * on failure.
1643 *
1644 * @param pi16 Where to return the signed word.
1645 * @remark Implicitly references pIemCpu.
1646 */
1647#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1648 do \
1649 { \
1650 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1651 if (rcStrict2 != VINF_SUCCESS) \
1652 return rcStrict2; \
1653 } while (0)
1654
1655
1656/**
1657 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1658 *
1659 * @returns Strict VBox status code.
1660 * @param pIemCpu The IEM state.
1661 * @param pu32 Where to return the opcode dword.
1662 */
1663DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1664{
1665 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1666 if (rcStrict == VINF_SUCCESS)
1667 {
1668 uint8_t offOpcode = pIemCpu->offOpcode;
1669 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1670 pIemCpu->abOpcode[offOpcode + 1],
1671 pIemCpu->abOpcode[offOpcode + 2],
1672 pIemCpu->abOpcode[offOpcode + 3]);
1673 pIemCpu->offOpcode = offOpcode + 4;
1674 }
1675 else
1676 *pu32 = 0;
1677 return rcStrict;
1678}
1679
1680
1681/**
1682 * Fetches the next opcode dword.
1683 *
1684 * @returns Strict VBox status code.
1685 * @param pIemCpu The IEM state.
1686 * @param pu32 Where to return the opcode double word.
1687 */
1688DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1689{
1690 uint8_t const offOpcode = pIemCpu->offOpcode;
1691 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1692 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1693
1694 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1695 pIemCpu->abOpcode[offOpcode + 1],
1696 pIemCpu->abOpcode[offOpcode + 2],
1697 pIemCpu->abOpcode[offOpcode + 3]);
1698 pIemCpu->offOpcode = offOpcode + 4;
1699 return VINF_SUCCESS;
1700}
1701
1702
1703/**
1704 * Fetches the next opcode dword, returns automatically on failure.
1705 *
1706 * @param a_pu32 Where to return the opcode dword.
1707 * @remark Implicitly references pIemCpu.
1708 */
1709#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1710 do \
1711 { \
1712 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1713 if (rcStrict2 != VINF_SUCCESS) \
1714 return rcStrict2; \
1715 } while (0)
1716
1717
1718/**
1719 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1720 *
1721 * @returns Strict VBox status code.
1722 * @param pIemCpu The IEM state.
1723 * @param pu32 Where to return the opcode dword.
1724 */
1725DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1726{
1727 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1728 if (rcStrict == VINF_SUCCESS)
1729 {
1730 uint8_t offOpcode = pIemCpu->offOpcode;
1731 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1732 pIemCpu->abOpcode[offOpcode + 1],
1733 pIemCpu->abOpcode[offOpcode + 2],
1734 pIemCpu->abOpcode[offOpcode + 3]);
1735 pIemCpu->offOpcode = offOpcode + 4;
1736 }
1737 else
1738 *pu64 = 0;
1739 return rcStrict;
1740}
1741
1742
1743/**
1744 * Fetches the next opcode dword, zero extending it to a quad word.
1745 *
1746 * @returns Strict VBox status code.
1747 * @param pIemCpu The IEM state.
1748 * @param pu64 Where to return the opcode quad word.
1749 */
1750DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1751{
1752 uint8_t const offOpcode = pIemCpu->offOpcode;
1753 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1754 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1755
1756 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1757 pIemCpu->abOpcode[offOpcode + 1],
1758 pIemCpu->abOpcode[offOpcode + 2],
1759 pIemCpu->abOpcode[offOpcode + 3]);
1760 pIemCpu->offOpcode = offOpcode + 4;
1761 return VINF_SUCCESS;
1762}
1763
1764
1765/**
1766 * Fetches the next opcode dword and zero extends it to a quad word, returns
1767 * automatically on failure.
1768 *
1769 * @param a_pu64 Where to return the opcode quad word.
1770 * @remark Implicitly references pIemCpu.
1771 */
1772#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1773 do \
1774 { \
1775 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1776 if (rcStrict2 != VINF_SUCCESS) \
1777 return rcStrict2; \
1778 } while (0)
1779
1780
1781/**
1782 * Fetches the next signed double word from the opcode stream.
1783 *
1784 * @returns Strict VBox status code.
1785 * @param pIemCpu The IEM state.
1786 * @param pi32 Where to return the signed double word.
1787 */
1788DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1789{
1790 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1791}
1792
1793/**
1794 * Fetches the next signed double word from the opcode stream, returning
1795 * automatically on failure.
1796 *
1797 * @param pi32 Where to return the signed double word.
1798 * @remark Implicitly references pIemCpu.
1799 */
1800#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1801 do \
1802 { \
1803 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1804 if (rcStrict2 != VINF_SUCCESS) \
1805 return rcStrict2; \
1806 } while (0)
1807
1808
1809/**
1810 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1811 *
1812 * @returns Strict VBox status code.
1813 * @param pIemCpu The IEM state.
1814 * @param pu64 Where to return the opcode qword.
1815 */
1816DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1817{
1818 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1819 if (rcStrict == VINF_SUCCESS)
1820 {
1821 uint8_t offOpcode = pIemCpu->offOpcode;
1822 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1823 pIemCpu->abOpcode[offOpcode + 1],
1824 pIemCpu->abOpcode[offOpcode + 2],
1825 pIemCpu->abOpcode[offOpcode + 3]);
1826 pIemCpu->offOpcode = offOpcode + 4;
1827 }
1828 else
1829 *pu64 = 0;
1830 return rcStrict;
1831}
1832
1833
1834/**
1835 * Fetches the next opcode dword, sign extending it into a quad word.
1836 *
1837 * @returns Strict VBox status code.
1838 * @param pIemCpu The IEM state.
1839 * @param pu64 Where to return the opcode quad word.
1840 */
1841DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1842{
1843 uint8_t const offOpcode = pIemCpu->offOpcode;
1844 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1845 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1846
1847 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1848 pIemCpu->abOpcode[offOpcode + 1],
1849 pIemCpu->abOpcode[offOpcode + 2],
1850 pIemCpu->abOpcode[offOpcode + 3]);
1851 *pu64 = i32;
1852 pIemCpu->offOpcode = offOpcode + 4;
1853 return VINF_SUCCESS;
1854}
1855
1856
1857/**
1858 * Fetches the next opcode double word and sign extends it to a quad word,
1859 * returns automatically on failure.
1860 *
1861 * @param a_pu64 Where to return the opcode quad word.
1862 * @remark Implicitly references pIemCpu.
1863 */
1864#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1865 do \
1866 { \
1867 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1868 if (rcStrict2 != VINF_SUCCESS) \
1869 return rcStrict2; \
1870 } while (0)
1871
1872
1873/**
1874 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1875 *
1876 * @returns Strict VBox status code.
1877 * @param pIemCpu The IEM state.
1878 * @param pu64 Where to return the opcode qword.
1879 */
1880DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1881{
1882 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1883 if (rcStrict == VINF_SUCCESS)
1884 {
1885 uint8_t offOpcode = pIemCpu->offOpcode;
1886 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1887 pIemCpu->abOpcode[offOpcode + 1],
1888 pIemCpu->abOpcode[offOpcode + 2],
1889 pIemCpu->abOpcode[offOpcode + 3],
1890 pIemCpu->abOpcode[offOpcode + 4],
1891 pIemCpu->abOpcode[offOpcode + 5],
1892 pIemCpu->abOpcode[offOpcode + 6],
1893 pIemCpu->abOpcode[offOpcode + 7]);
1894 pIemCpu->offOpcode = offOpcode + 8;
1895 }
1896 else
1897 *pu64 = 0;
1898 return rcStrict;
1899}
1900
1901
1902/**
1903 * Fetches the next opcode qword.
1904 *
1905 * @returns Strict VBox status code.
1906 * @param pIemCpu The IEM state.
1907 * @param pu64 Where to return the opcode qword.
1908 */
1909DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1910{
1911 uint8_t const offOpcode = pIemCpu->offOpcode;
1912 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1913 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1914
1915 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1916 pIemCpu->abOpcode[offOpcode + 1],
1917 pIemCpu->abOpcode[offOpcode + 2],
1918 pIemCpu->abOpcode[offOpcode + 3],
1919 pIemCpu->abOpcode[offOpcode + 4],
1920 pIemCpu->abOpcode[offOpcode + 5],
1921 pIemCpu->abOpcode[offOpcode + 6],
1922 pIemCpu->abOpcode[offOpcode + 7]);
1923 pIemCpu->offOpcode = offOpcode + 8;
1924 return VINF_SUCCESS;
1925}
1926
1927
1928/**
1929 * Fetches the next opcode quad word, returns automatically on failure.
1930 *
1931 * @param a_pu64 Where to return the opcode quad word.
1932 * @remark Implicitly references pIemCpu.
1933 */
1934#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1935 do \
1936 { \
1937 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1938 if (rcStrict2 != VINF_SUCCESS) \
1939 return rcStrict2; \
1940 } while (0)
1941
1942
1943/** @name Misc Worker Functions.
1944 * @{
1945 */
1946
1947
1948/**
1949 * Validates a new SS segment.
1950 *
1951 * @returns VBox strict status code.
1952 * @param pIemCpu The IEM per CPU instance data.
1953 * @param pCtx The CPU context.
1954 * @param NewSS The new SS selctor.
1955 * @param uCpl The CPL to load the stack for.
1956 * @param pDesc Where to return the descriptor.
1957 */
1958IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1959{
1960 NOREF(pCtx);
1961
1962 /* Null selectors are not allowed (we're not called for dispatching
1963 interrupts with SS=0 in long mode). */
1964 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1965 {
1966 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1967 return iemRaiseTaskSwitchFault0(pIemCpu);
1968 }
1969
1970 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1971 if ((NewSS & X86_SEL_RPL) != uCpl)
1972 {
1973 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1974 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1975 }
1976
1977 /*
1978 * Read the descriptor.
1979 */
1980 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1981 if (rcStrict != VINF_SUCCESS)
1982 return rcStrict;
1983
1984 /*
1985 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1986 */
1987 if (!pDesc->Legacy.Gen.u1DescType)
1988 {
1989 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1990 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1991 }
1992
1993 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1994 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1995 {
1996 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1997 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1998 }
1999 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2000 {
2001 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2002 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2003 }
2004
2005 /* Is it there? */
2006 /** @todo testcase: Is this checked before the canonical / limit check below? */
2007 if (!pDesc->Legacy.Gen.u1Present)
2008 {
2009 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2010 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2011 }
2012
2013 return VINF_SUCCESS;
2014}
2015
2016
2017/**
2018 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2019 * not.
2020 *
2021 * @param a_pIemCpu The IEM per CPU data.
2022 * @param a_pCtx The CPU context.
2023 */
2024#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2025# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2026 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2027 ? (a_pCtx)->eflags.u \
2028 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2029#else
2030# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2031 ( (a_pCtx)->eflags.u )
2032#endif
2033
2034/**
2035 * Updates the EFLAGS in the correct manner wrt. PATM.
2036 *
2037 * @param a_pIemCpu The IEM per CPU data.
2038 * @param a_pCtx The CPU context.
2039 */
2040#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2041# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2042 do { \
2043 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2044 (a_pCtx)->eflags.u = (a_fEfl); \
2045 else \
2046 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2047 } while (0)
2048#else
2049# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2050 do { \
2051 (a_pCtx)->eflags.u = (a_fEfl); \
2052 } while (0)
2053#endif
2054
2055
2056/** @} */
2057
2058/** @name Raising Exceptions.
2059 *
2060 * @{
2061 */
2062
2063/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2064 * @{ */
2065/** CPU exception. */
2066#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2067/** External interrupt (from PIC, APIC, whatever). */
2068#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2069/** Software interrupt (int or into, not bound).
2070 * Returns to the following instruction */
2071#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2072/** Takes an error code. */
2073#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2074/** Takes a CR2. */
2075#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2076/** Generated by the breakpoint instruction. */
2077#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2078/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2079#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2080/** @} */
2081
2082
2083/**
2084 * Loads the specified stack far pointer from the TSS.
2085 *
2086 * @returns VBox strict status code.
2087 * @param pIemCpu The IEM per CPU instance data.
2088 * @param pCtx The CPU context.
2089 * @param uCpl The CPL to load the stack for.
2090 * @param pSelSS Where to return the new stack segment.
2091 * @param puEsp Where to return the new stack pointer.
2092 */
2093IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2094 PRTSEL pSelSS, uint32_t *puEsp)
2095{
2096 VBOXSTRICTRC rcStrict;
2097 Assert(uCpl < 4);
2098 *puEsp = 0; /* make gcc happy */
2099 *pSelSS = 0; /* make gcc happy */
2100
2101 switch (pCtx->tr.Attr.n.u4Type)
2102 {
2103 /*
2104 * 16-bit TSS (X86TSS16).
2105 */
2106 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2107 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2108 {
2109 uint32_t off = uCpl * 4 + 2;
2110 if (off + 4 > pCtx->tr.u32Limit)
2111 {
2112 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2113 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2114 }
2115
2116 uint32_t u32Tmp = 0; /* gcc maybe... */
2117 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2118 if (rcStrict == VINF_SUCCESS)
2119 {
2120 *puEsp = RT_LOWORD(u32Tmp);
2121 *pSelSS = RT_HIWORD(u32Tmp);
2122 return VINF_SUCCESS;
2123 }
2124 break;
2125 }
2126
2127 /*
2128 * 32-bit TSS (X86TSS32).
2129 */
2130 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2131 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2132 {
2133 uint32_t off = uCpl * 8 + 4;
2134 if (off + 7 > pCtx->tr.u32Limit)
2135 {
2136 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2137 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2138 }
2139
2140 uint64_t u64Tmp;
2141 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2142 if (rcStrict == VINF_SUCCESS)
2143 {
2144 *puEsp = u64Tmp & UINT32_MAX;
2145 *pSelSS = (RTSEL)(u64Tmp >> 32);
2146 return VINF_SUCCESS;
2147 }
2148 break;
2149 }
2150
2151 default:
2152 AssertFailedReturn(VERR_IEM_IPE_4);
2153 }
2154 return rcStrict;
2155}
2156
2157
2158/**
2159 * Loads the specified stack pointer from the 64-bit TSS.
2160 *
2161 * @returns VBox strict status code.
2162 * @param pIemCpu The IEM per CPU instance data.
2163 * @param pCtx The CPU context.
2164 * @param uCpl The CPL to load the stack for.
2165 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2166 * @param puRsp Where to return the new stack pointer.
2167 */
2168IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2169{
2170 Assert(uCpl < 4);
2171 Assert(uIst < 8);
2172 *puRsp = 0; /* make gcc happy */
2173
2174 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2175
2176 uint32_t off;
2177 if (uIst)
2178 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2179 else
2180 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2181 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2182 {
2183 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2184 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2185 }
2186
2187 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2188}
2189
2190
2191/**
2192 * Adjust the CPU state according to the exception being raised.
2193 *
2194 * @param pCtx The CPU context.
2195 * @param u8Vector The exception that has been raised.
2196 */
2197DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2198{
2199 switch (u8Vector)
2200 {
2201 case X86_XCPT_DB:
2202 pCtx->dr[7] &= ~X86_DR7_GD;
2203 break;
2204 /** @todo Read the AMD and Intel exception reference... */
2205 }
2206}
2207
2208
2209/**
2210 * Implements exceptions and interrupts for real mode.
2211 *
2212 * @returns VBox strict status code.
2213 * @param pIemCpu The IEM per CPU instance data.
2214 * @param pCtx The CPU context.
2215 * @param cbInstr The number of bytes to offset rIP by in the return
2216 * address.
2217 * @param u8Vector The interrupt / exception vector number.
2218 * @param fFlags The flags.
2219 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2220 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2221 */
2222IEM_STATIC VBOXSTRICTRC
2223iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2224 PCPUMCTX pCtx,
2225 uint8_t cbInstr,
2226 uint8_t u8Vector,
2227 uint32_t fFlags,
2228 uint16_t uErr,
2229 uint64_t uCr2)
2230{
2231 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2232 NOREF(uErr); NOREF(uCr2);
2233
2234 /*
2235 * Read the IDT entry.
2236 */
2237 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2238 {
2239 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2240 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2241 }
2242 RTFAR16 Idte;
2243 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2244 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2245 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2246 return rcStrict;
2247
2248 /*
2249 * Push the stack frame.
2250 */
2251 uint16_t *pu16Frame;
2252 uint64_t uNewRsp;
2253 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2254 if (rcStrict != VINF_SUCCESS)
2255 return rcStrict;
2256
2257 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2258 pu16Frame[2] = (uint16_t)fEfl;
2259 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2260 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2261 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2262 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2263 return rcStrict;
2264
2265 /*
2266 * Load the vector address into cs:ip and make exception specific state
2267 * adjustments.
2268 */
2269 pCtx->cs.Sel = Idte.sel;
2270 pCtx->cs.ValidSel = Idte.sel;
2271 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2272 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2273 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2274 pCtx->rip = Idte.off;
2275 fEfl &= ~X86_EFL_IF;
2276 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2277
2278 /** @todo do we actually do this in real mode? */
2279 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2280 iemRaiseXcptAdjustState(pCtx, u8Vector);
2281
2282 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2283}
2284
2285
2286/**
2287 * Loads a NULL data selector into when coming from V8086 mode.
2288 *
2289 * @param pIemCpu The IEM per CPU instance data.
2290 * @param pSReg Pointer to the segment register.
2291 */
2292IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2293{
2294 pSReg->Sel = 0;
2295 pSReg->ValidSel = 0;
2296 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2297 {
2298 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2299 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2300 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2301 }
2302 else
2303 {
2304 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2305 /** @todo check this on AMD-V */
2306 pSReg->u64Base = 0;
2307 pSReg->u32Limit = 0;
2308 }
2309}
2310
2311
2312/**
2313 * Loads a segment selector during a task switch in V8086 mode.
2314 *
2315 * @param pIemCpu The IEM per CPU instance data.
2316 * @param pSReg Pointer to the segment register.
2317 * @param uSel The selector value to load.
2318 */
2319IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2320{
2321 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2322 pSReg->Sel = uSel;
2323 pSReg->ValidSel = uSel;
2324 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2325 pSReg->u64Base = uSel << 4;
2326 pSReg->u32Limit = 0xffff;
2327 pSReg->Attr.u = 0xf3;
2328}
2329
2330
2331/**
2332 * Loads a NULL data selector into a selector register, both the hidden and
2333 * visible parts, in protected mode.
2334 *
2335 * @param pIemCpu The IEM state of the calling EMT.
2336 * @param pSReg Pointer to the segment register.
2337 * @param uRpl The RPL.
2338 */
2339IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2340{
2341 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2342 * data selector in protected mode. */
2343 pSReg->Sel = uRpl;
2344 pSReg->ValidSel = uRpl;
2345 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2346 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2347 {
2348 /* VT-x (Intel 3960x) observed doing something like this. */
2349 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2350 pSReg->u32Limit = UINT32_MAX;
2351 pSReg->u64Base = 0;
2352 }
2353 else
2354 {
2355 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2356 pSReg->u32Limit = 0;
2357 pSReg->u64Base = 0;
2358 }
2359}
2360
2361
2362/**
2363 * Loads a segment selector during a task switch in protected mode. In this task
2364 * switch scenario, we would throw #TS exceptions rather than #GPs.
2365 *
2366 * @returns VBox strict status code.
2367 * @param pIemCpu The IEM per CPU instance data.
2368 * @param pSReg Pointer to the segment register.
2369 * @param uSel The new selector value.
2370 *
2371 * @remarks This does -NOT- handle CS or SS.
2372 * @remarks This expects pIemCpu->uCpl to be up to date.
2373 */
2374IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2375{
2376 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2377
2378 /* Null data selector. */
2379 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2380 {
2381 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2382 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2383 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2384 return VINF_SUCCESS;
2385 }
2386
2387 /* Fetch the descriptor. */
2388 IEMSELDESC Desc;
2389 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2390 if (rcStrict != VINF_SUCCESS)
2391 {
2392 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2393 VBOXSTRICTRC_VAL(rcStrict)));
2394 return rcStrict;
2395 }
2396
2397 /* Must be a data segment or readable code segment. */
2398 if ( !Desc.Legacy.Gen.u1DescType
2399 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2400 {
2401 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2402 Desc.Legacy.Gen.u4Type));
2403 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2404 }
2405
2406 /* Check privileges for data segments and non-conforming code segments. */
2407 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2408 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2409 {
2410 /* The RPL and the new CPL must be less than or equal to the DPL. */
2411 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2412 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2413 {
2414 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2415 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2416 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2417 }
2418 }
2419
2420 /* Is it there? */
2421 if (!Desc.Legacy.Gen.u1Present)
2422 {
2423 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2424 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2425 }
2426
2427 /* The base and limit. */
2428 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2429 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2430
2431 /*
2432 * Ok, everything checked out fine. Now set the accessed bit before
2433 * committing the result into the registers.
2434 */
2435 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2436 {
2437 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2438 if (rcStrict != VINF_SUCCESS)
2439 return rcStrict;
2440 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2441 }
2442
2443 /* Commit */
2444 pSReg->Sel = uSel;
2445 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2446 pSReg->u32Limit = cbLimit;
2447 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2448 pSReg->ValidSel = uSel;
2449 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2450 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2451 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2452
2453 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2454 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2455 return VINF_SUCCESS;
2456}
2457
2458
2459/**
2460 * Performs a task switch.
2461 *
2462 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2463 * caller is responsible for performing the necessary checks (like DPL, TSS
2464 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2465 * reference for JMP, CALL, IRET.
2466 *
2467 * If the task switch is the due to a software interrupt or hardware exception,
2468 * the caller is responsible for validating the TSS selector and descriptor. See
2469 * Intel Instruction reference for INT n.
2470 *
2471 * @returns VBox strict status code.
2472 * @param pIemCpu The IEM per CPU instance data.
2473 * @param pCtx The CPU context.
2474 * @param enmTaskSwitch What caused this task switch.
2475 * @param uNextEip The EIP effective after the task switch.
2476 * @param fFlags The flags.
2477 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2478 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2479 * @param SelTSS The TSS selector of the new task.
2480 * @param pNewDescTSS Pointer to the new TSS descriptor.
2481 */
2482IEM_STATIC VBOXSTRICTRC
2483iemTaskSwitch(PIEMCPU pIemCpu,
2484 PCPUMCTX pCtx,
2485 IEMTASKSWITCH enmTaskSwitch,
2486 uint32_t uNextEip,
2487 uint32_t fFlags,
2488 uint16_t uErr,
2489 uint64_t uCr2,
2490 RTSEL SelTSS,
2491 PIEMSELDESC pNewDescTSS)
2492{
2493 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2494 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2495
2496 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2497 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2498 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2499 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2500 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2501
2502 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2503 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2504
2505 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2506 fIsNewTSS386, pCtx->eip, uNextEip));
2507
2508 /* Update CR2 in case it's a page-fault. */
2509 /** @todo This should probably be done much earlier in IEM/PGM. See
2510 * @bugref{5653#c49}. */
2511 if (fFlags & IEM_XCPT_FLAGS_CR2)
2512 pCtx->cr2 = uCr2;
2513
2514 /*
2515 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2516 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2517 */
2518 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2519 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2520 if (uNewTSSLimit < uNewTSSLimitMin)
2521 {
2522 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2523 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2524 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2525 }
2526
2527 /*
2528 * Check the current TSS limit. The last written byte to the current TSS during the
2529 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2530 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2531 *
2532 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2533 * end up with smaller than "legal" TSS limits.
2534 */
2535 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2536 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2537 if (uCurTSSLimit < uCurTSSLimitMin)
2538 {
2539 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2540 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2541 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2542 }
2543
2544 /*
2545 * Verify that the new TSS can be accessed and map it. Map only the required contents
2546 * and not the entire TSS.
2547 */
2548 void *pvNewTSS;
2549 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2550 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2551 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, IntRedirBitmap) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2552 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2553 * not perform correct translation if this happens. See Intel spec. 7.2.1
2554 * "Task-State Segment" */
2555 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2556 if (rcStrict != VINF_SUCCESS)
2557 {
2558 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2559 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2560 return rcStrict;
2561 }
2562
2563 /*
2564 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2565 */
2566 uint32_t u32EFlags = pCtx->eflags.u32;
2567 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2568 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2569 {
2570 PX86DESC pDescCurTSS;
2571 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2572 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2573 if (rcStrict != VINF_SUCCESS)
2574 {
2575 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2576 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2577 return rcStrict;
2578 }
2579
2580 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2581 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2582 if (rcStrict != VINF_SUCCESS)
2583 {
2584 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2585 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2586 return rcStrict;
2587 }
2588
2589 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2590 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2591 {
2592 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2593 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2594 u32EFlags &= ~X86_EFL_NT;
2595 }
2596 }
2597
2598 /*
2599 * Save the CPU state into the current TSS.
2600 */
2601 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2602 if (GCPtrNewTSS == GCPtrCurTSS)
2603 {
2604 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2605 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2606 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2607 }
2608 if (fIsNewTSS386)
2609 {
2610 /*
2611 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2612 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2613 */
2614 void *pvCurTSS32;
2615 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2616 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2617 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2618 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2619 if (rcStrict != VINF_SUCCESS)
2620 {
2621 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2622 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2623 return rcStrict;
2624 }
2625
2626 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2627 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2628 pCurTSS32->eip = uNextEip;
2629 pCurTSS32->eflags = u32EFlags;
2630 pCurTSS32->eax = pCtx->eax;
2631 pCurTSS32->ecx = pCtx->ecx;
2632 pCurTSS32->edx = pCtx->edx;
2633 pCurTSS32->ebx = pCtx->ebx;
2634 pCurTSS32->esp = pCtx->esp;
2635 pCurTSS32->ebp = pCtx->ebp;
2636 pCurTSS32->esi = pCtx->esi;
2637 pCurTSS32->edi = pCtx->edi;
2638 pCurTSS32->es = pCtx->es.Sel;
2639 pCurTSS32->cs = pCtx->cs.Sel;
2640 pCurTSS32->ss = pCtx->ss.Sel;
2641 pCurTSS32->ds = pCtx->ds.Sel;
2642 pCurTSS32->fs = pCtx->fs.Sel;
2643 pCurTSS32->gs = pCtx->gs.Sel;
2644
2645 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2646 if (rcStrict != VINF_SUCCESS)
2647 {
2648 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2649 VBOXSTRICTRC_VAL(rcStrict)));
2650 return rcStrict;
2651 }
2652 }
2653 else
2654 {
2655 /*
2656 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2657 */
2658 void *pvCurTSS16;
2659 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2660 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2661 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2662 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2663 if (rcStrict != VINF_SUCCESS)
2664 {
2665 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2666 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2667 return rcStrict;
2668 }
2669
2670 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2671 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2672 pCurTSS16->ip = uNextEip;
2673 pCurTSS16->flags = u32EFlags;
2674 pCurTSS16->ax = pCtx->ax;
2675 pCurTSS16->cx = pCtx->cx;
2676 pCurTSS16->dx = pCtx->dx;
2677 pCurTSS16->bx = pCtx->bx;
2678 pCurTSS16->sp = pCtx->sp;
2679 pCurTSS16->bp = pCtx->bp;
2680 pCurTSS16->si = pCtx->si;
2681 pCurTSS16->di = pCtx->di;
2682 pCurTSS16->es = pCtx->es.Sel;
2683 pCurTSS16->cs = pCtx->cs.Sel;
2684 pCurTSS16->ss = pCtx->ss.Sel;
2685 pCurTSS16->ds = pCtx->ds.Sel;
2686
2687 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2688 if (rcStrict != VINF_SUCCESS)
2689 {
2690 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2691 VBOXSTRICTRC_VAL(rcStrict)));
2692 return rcStrict;
2693 }
2694 }
2695
2696 /*
2697 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2698 */
2699 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2700 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2701 {
2702 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2703 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2704 pNewTSS->selPrev = pCtx->tr.Sel;
2705 }
2706
2707 /*
2708 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2709 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2710 */
2711 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2712 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2713 bool fNewDebugTrap;
2714 if (fIsNewTSS386)
2715 {
2716 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2717 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2718 uNewEip = pNewTSS32->eip;
2719 uNewEflags = pNewTSS32->eflags;
2720 uNewEax = pNewTSS32->eax;
2721 uNewEcx = pNewTSS32->ecx;
2722 uNewEdx = pNewTSS32->edx;
2723 uNewEbx = pNewTSS32->ebx;
2724 uNewEsp = pNewTSS32->esp;
2725 uNewEbp = pNewTSS32->ebp;
2726 uNewEsi = pNewTSS32->esi;
2727 uNewEdi = pNewTSS32->edi;
2728 uNewES = pNewTSS32->es;
2729 uNewCS = pNewTSS32->cs;
2730 uNewSS = pNewTSS32->ss;
2731 uNewDS = pNewTSS32->ds;
2732 uNewFS = pNewTSS32->fs;
2733 uNewGS = pNewTSS32->gs;
2734 uNewLdt = pNewTSS32->selLdt;
2735 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2736 }
2737 else
2738 {
2739 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2740 uNewCr3 = 0;
2741 uNewEip = pNewTSS16->ip;
2742 uNewEflags = pNewTSS16->flags;
2743 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2744 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2745 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2746 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2747 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2748 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2749 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2750 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2751 uNewES = pNewTSS16->es;
2752 uNewCS = pNewTSS16->cs;
2753 uNewSS = pNewTSS16->ss;
2754 uNewDS = pNewTSS16->ds;
2755 uNewFS = 0;
2756 uNewGS = 0;
2757 uNewLdt = pNewTSS16->selLdt;
2758 fNewDebugTrap = false;
2759 }
2760
2761 if (GCPtrNewTSS == GCPtrCurTSS)
2762 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2763 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2764
2765 /*
2766 * We're done accessing the new TSS.
2767 */
2768 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2769 if (rcStrict != VINF_SUCCESS)
2770 {
2771 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2772 return rcStrict;
2773 }
2774
2775 /*
2776 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2777 */
2778 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2779 {
2780 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2781 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2782 if (rcStrict != VINF_SUCCESS)
2783 {
2784 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2785 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2786 return rcStrict;
2787 }
2788
2789 /* Check that the descriptor indicates the new TSS is available (not busy). */
2790 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2791 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2792 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2793
2794 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2795 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2796 if (rcStrict != VINF_SUCCESS)
2797 {
2798 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2799 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2800 return rcStrict;
2801 }
2802 }
2803
2804 /*
2805 * From this point on, we're technically in the new task. We will defer exceptions
2806 * until the completion of the task switch but before executing any instructions in the new task.
2807 */
2808 pCtx->tr.Sel = SelTSS;
2809 pCtx->tr.ValidSel = SelTSS;
2810 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2811 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2812 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2813 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2814 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2815
2816 /* Set the busy bit in TR. */
2817 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2818 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2819 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2820 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2821 {
2822 uNewEflags |= X86_EFL_NT;
2823 }
2824
2825 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2826 pCtx->cr0 |= X86_CR0_TS;
2827 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2828
2829 pCtx->eip = uNewEip;
2830 pCtx->eax = uNewEax;
2831 pCtx->ecx = uNewEcx;
2832 pCtx->edx = uNewEdx;
2833 pCtx->ebx = uNewEbx;
2834 pCtx->esp = uNewEsp;
2835 pCtx->ebp = uNewEbp;
2836 pCtx->esi = uNewEsi;
2837 pCtx->edi = uNewEdi;
2838
2839 uNewEflags &= X86_EFL_LIVE_MASK;
2840 uNewEflags |= X86_EFL_RA1_MASK;
2841 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2842
2843 /*
2844 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2845 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2846 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2847 */
2848 pCtx->es.Sel = uNewES;
2849 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2850 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2851
2852 pCtx->cs.Sel = uNewCS;
2853 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2854 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2855
2856 pCtx->ss.Sel = uNewSS;
2857 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2858 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2859
2860 pCtx->ds.Sel = uNewDS;
2861 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2862 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2863
2864 pCtx->fs.Sel = uNewFS;
2865 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2866 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2867
2868 pCtx->gs.Sel = uNewGS;
2869 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2870 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2871 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2872
2873 pCtx->ldtr.Sel = uNewLdt;
2874 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2875 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2876 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2877
2878 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2879 {
2880 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2881 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2882 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2883 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2884 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2885 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2886 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2887 }
2888
2889 /*
2890 * Switch CR3 for the new task.
2891 */
2892 if ( fIsNewTSS386
2893 && (pCtx->cr0 & X86_CR0_PG))
2894 {
2895 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2896 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2897 {
2898 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2899 AssertRCSuccessReturn(rc, rc);
2900 }
2901 else
2902 pCtx->cr3 = uNewCr3;
2903
2904 /* Inform PGM. */
2905 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2906 {
2907 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2908 AssertRCReturn(rc, rc);
2909 /* ignore informational status codes */
2910 }
2911 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2912 }
2913
2914 /*
2915 * Switch LDTR for the new task.
2916 */
2917 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2918 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2919 else
2920 {
2921 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2922
2923 IEMSELDESC DescNewLdt;
2924 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2925 if (rcStrict != VINF_SUCCESS)
2926 {
2927 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2928 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2929 return rcStrict;
2930 }
2931 if ( !DescNewLdt.Legacy.Gen.u1Present
2932 || DescNewLdt.Legacy.Gen.u1DescType
2933 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2934 {
2935 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2936 uNewLdt, DescNewLdt.Legacy.u));
2937 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2938 }
2939
2940 pCtx->ldtr.ValidSel = uNewLdt;
2941 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2942 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2943 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2944 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2945 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2946 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2947 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2948 }
2949
2950 IEMSELDESC DescSS;
2951 if (IEM_IS_V86_MODE(pIemCpu))
2952 {
2953 pIemCpu->uCpl = 3;
2954 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
2955 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
2956 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
2957 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
2958 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
2959 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
2960 }
2961 else
2962 {
2963 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
2964
2965 /*
2966 * Load the stack segment for the new task.
2967 */
2968 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2969 {
2970 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2971 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2972 }
2973
2974 /* Fetch the descriptor. */
2975 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
2976 if (rcStrict != VINF_SUCCESS)
2977 {
2978 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2979 VBOXSTRICTRC_VAL(rcStrict)));
2980 return rcStrict;
2981 }
2982
2983 /* SS must be a data segment and writable. */
2984 if ( !DescSS.Legacy.Gen.u1DescType
2985 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2986 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2987 {
2988 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2989 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2990 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2991 }
2992
2993 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2994 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2995 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2996 {
2997 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2998 uNewCpl));
2999 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3000 }
3001
3002 /* Is it there? */
3003 if (!DescSS.Legacy.Gen.u1Present)
3004 {
3005 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3006 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3007 }
3008
3009 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3010 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3011
3012 /* Set the accessed bit before committing the result into SS. */
3013 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3014 {
3015 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3016 if (rcStrict != VINF_SUCCESS)
3017 return rcStrict;
3018 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3019 }
3020
3021 /* Commit SS. */
3022 pCtx->ss.Sel = uNewSS;
3023 pCtx->ss.ValidSel = uNewSS;
3024 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3025 pCtx->ss.u32Limit = cbLimit;
3026 pCtx->ss.u64Base = u64Base;
3027 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3028 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3029
3030 /* CPL has changed, update IEM before loading rest of segments. */
3031 pIemCpu->uCpl = uNewCpl;
3032
3033 /*
3034 * Load the data segments for the new task.
3035 */
3036 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3037 if (rcStrict != VINF_SUCCESS)
3038 return rcStrict;
3039 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3040 if (rcStrict != VINF_SUCCESS)
3041 return rcStrict;
3042 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3043 if (rcStrict != VINF_SUCCESS)
3044 return rcStrict;
3045 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3046 if (rcStrict != VINF_SUCCESS)
3047 return rcStrict;
3048
3049 /*
3050 * Load the code segment for the new task.
3051 */
3052 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3053 {
3054 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3055 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3056 }
3057
3058 /* Fetch the descriptor. */
3059 IEMSELDESC DescCS;
3060 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3061 if (rcStrict != VINF_SUCCESS)
3062 {
3063 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3064 return rcStrict;
3065 }
3066
3067 /* CS must be a code segment. */
3068 if ( !DescCS.Legacy.Gen.u1DescType
3069 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3070 {
3071 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3072 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3073 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3074 }
3075
3076 /* For conforming CS, DPL must be less than or equal to the RPL. */
3077 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3078 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3079 {
3080 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3081 DescCS.Legacy.Gen.u2Dpl));
3082 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3083 }
3084
3085 /* For non-conforming CS, DPL must match RPL. */
3086 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3087 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3088 {
3089 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3090 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3091 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3092 }
3093
3094 /* Is it there? */
3095 if (!DescCS.Legacy.Gen.u1Present)
3096 {
3097 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3098 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3099 }
3100
3101 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3102 u64Base = X86DESC_BASE(&DescCS.Legacy);
3103
3104 /* Set the accessed bit before committing the result into CS. */
3105 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3106 {
3107 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3108 if (rcStrict != VINF_SUCCESS)
3109 return rcStrict;
3110 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3111 }
3112
3113 /* Commit CS. */
3114 pCtx->cs.Sel = uNewCS;
3115 pCtx->cs.ValidSel = uNewCS;
3116 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3117 pCtx->cs.u32Limit = cbLimit;
3118 pCtx->cs.u64Base = u64Base;
3119 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3121 }
3122
3123 /** @todo Debug trap. */
3124 if (fIsNewTSS386 && fNewDebugTrap)
3125 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3126
3127 /*
3128 * Construct the error code masks based on what caused this task switch.
3129 * See Intel Instruction reference for INT.
3130 */
3131 uint16_t uExt;
3132 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3133 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3134 {
3135 uExt = 1;
3136 }
3137 else
3138 uExt = 0;
3139
3140 /*
3141 * Push any error code on to the new stack.
3142 */
3143 if (fFlags & IEM_XCPT_FLAGS_ERR)
3144 {
3145 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3146 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3147 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3148
3149 /* Check that there is sufficient space on the stack. */
3150 /** @todo Factor out segment limit checking for normal/expand down segments
3151 * into a separate function. */
3152 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3153 {
3154 if ( pCtx->esp - 1 > cbLimitSS
3155 || pCtx->esp < cbStackFrame)
3156 {
3157 /** @todo Intel says #SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3158 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3159 cbStackFrame));
3160 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3161 }
3162 }
3163 else
3164 {
3165 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3166 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3167 {
3168 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3169 cbStackFrame));
3170 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3171 }
3172 }
3173
3174
3175 if (fIsNewTSS386)
3176 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3177 else
3178 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3179 if (rcStrict != VINF_SUCCESS)
3180 {
3181 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3182 VBOXSTRICTRC_VAL(rcStrict)));
3183 return rcStrict;
3184 }
3185 }
3186
3187 /* Check the new EIP against the new CS limit. */
3188 if (pCtx->eip > pCtx->cs.u32Limit)
3189 {
3190 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3191 pCtx->eip, pCtx->cs.u32Limit));
3192 /** @todo Intel says #GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3193 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3194 }
3195
3196 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3197 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3198}
3199
3200
3201/**
3202 * Implements exceptions and interrupts for protected mode.
3203 *
3204 * @returns VBox strict status code.
3205 * @param pIemCpu The IEM per CPU instance data.
3206 * @param pCtx The CPU context.
3207 * @param cbInstr The number of bytes to offset rIP by in the return
3208 * address.
3209 * @param u8Vector The interrupt / exception vector number.
3210 * @param fFlags The flags.
3211 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3212 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3213 */
3214IEM_STATIC VBOXSTRICTRC
3215iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3216 PCPUMCTX pCtx,
3217 uint8_t cbInstr,
3218 uint8_t u8Vector,
3219 uint32_t fFlags,
3220 uint16_t uErr,
3221 uint64_t uCr2)
3222{
3223 /*
3224 * Read the IDT entry.
3225 */
3226 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3227 {
3228 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3229 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3230 }
3231 X86DESC Idte;
3232 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3233 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3234 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3235 return rcStrict;
3236 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3237 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3238 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3239
3240 /*
3241 * Check the descriptor type, DPL and such.
3242 * ASSUMES this is done in the same order as described for call-gate calls.
3243 */
3244 if (Idte.Gate.u1DescType)
3245 {
3246 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3247 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3248 }
3249 bool fTaskGate = false;
3250 uint8_t f32BitGate = true;
3251 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3252 switch (Idte.Gate.u4Type)
3253 {
3254 case X86_SEL_TYPE_SYS_UNDEFINED:
3255 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3256 case X86_SEL_TYPE_SYS_LDT:
3257 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3258 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3259 case X86_SEL_TYPE_SYS_UNDEFINED2:
3260 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3261 case X86_SEL_TYPE_SYS_UNDEFINED3:
3262 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3263 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3264 case X86_SEL_TYPE_SYS_UNDEFINED4:
3265 {
3266 /** @todo check what actually happens when the type is wrong...
3267 * esp. call gates. */
3268 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3269 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3270 }
3271
3272 case X86_SEL_TYPE_SYS_286_INT_GATE:
3273 f32BitGate = false;
3274 case X86_SEL_TYPE_SYS_386_INT_GATE:
3275 fEflToClear |= X86_EFL_IF;
3276 break;
3277
3278 case X86_SEL_TYPE_SYS_TASK_GATE:
3279 fTaskGate = true;
3280#ifndef IEM_IMPLEMENTS_TASKSWITCH
3281 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3282#endif
3283 break;
3284
3285 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3286 f32BitGate = false;
3287 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3288 break;
3289
3290 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3291 }
3292
3293 /* Check DPL against CPL if applicable. */
3294 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3295 {
3296 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3297 {
3298 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3299 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3300 }
3301 }
3302
3303 /* Is it there? */
3304 if (!Idte.Gate.u1Present)
3305 {
3306 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3307 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3308 }
3309
3310 /* Is it a task-gate? */
3311 if (fTaskGate)
3312 {
3313 /*
3314 * Construct the error code masks based on what caused this task switch.
3315 * See Intel Instruction reference for INT.
3316 */
3317 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3318 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3319 RTSEL SelTSS = Idte.Gate.u16Sel;
3320
3321 /*
3322 * Fetch the TSS descriptor in the GDT.
3323 */
3324 IEMSELDESC DescTSS;
3325 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3326 if (rcStrict != VINF_SUCCESS)
3327 {
3328 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3329 VBOXSTRICTRC_VAL(rcStrict)));
3330 return rcStrict;
3331 }
3332
3333 /* The TSS descriptor must be a system segment and be available (not busy). */
3334 if ( DescTSS.Legacy.Gen.u1DescType
3335 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3336 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3337 {
3338 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3339 u8Vector, SelTSS, DescTSS.Legacy.au64));
3340 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3341 }
3342
3343 /* The TSS must be present. */
3344 if (!DescTSS.Legacy.Gen.u1Present)
3345 {
3346 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3347 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3348 }
3349
3350 /* Do the actual task switch. */
3351 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3352 }
3353
3354 /* A null CS is bad. */
3355 RTSEL NewCS = Idte.Gate.u16Sel;
3356 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3357 {
3358 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3359 return iemRaiseGeneralProtectionFault0(pIemCpu);
3360 }
3361
3362 /* Fetch the descriptor for the new CS. */
3363 IEMSELDESC DescCS;
3364 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3365 if (rcStrict != VINF_SUCCESS)
3366 {
3367 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3368 return rcStrict;
3369 }
3370
3371 /* Must be a code segment. */
3372 if (!DescCS.Legacy.Gen.u1DescType)
3373 {
3374 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3375 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3376 }
3377 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3378 {
3379 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3380 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3381 }
3382
3383 /* Don't allow lowering the privilege level. */
3384 /** @todo Does the lowering of privileges apply to software interrupts
3385 * only? This has bearings on the more-privileged or
3386 * same-privilege stack behavior further down. A testcase would
3387 * be nice. */
3388 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3389 {
3390 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3391 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3392 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3393 }
3394
3395 /* Make sure the selector is present. */
3396 if (!DescCS.Legacy.Gen.u1Present)
3397 {
3398 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3399 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3400 }
3401
3402 /* Check the new EIP against the new CS limit. */
3403 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3404 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3405 ? Idte.Gate.u16OffsetLow
3406 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3407 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3408 if (uNewEip > cbLimitCS)
3409 {
3410 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3411 u8Vector, uNewEip, cbLimitCS, NewCS));
3412 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3413 }
3414
3415 /* Calc the flag image to push. */
3416 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3417 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3418 fEfl &= ~X86_EFL_RF;
3419 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3420 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3421
3422 /* From V8086 mode only go to CPL 0. */
3423 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3424 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3425 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3426 {
3427 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3428 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3429 }
3430
3431 /*
3432 * If the privilege level changes, we need to get a new stack from the TSS.
3433 * This in turns means validating the new SS and ESP...
3434 */
3435 if (uNewCpl != pIemCpu->uCpl)
3436 {
3437 RTSEL NewSS;
3438 uint32_t uNewEsp;
3439 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3440 if (rcStrict != VINF_SUCCESS)
3441 return rcStrict;
3442
3443 IEMSELDESC DescSS;
3444 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3445 if (rcStrict != VINF_SUCCESS)
3446 return rcStrict;
3447
3448 /* Check that there is sufficient space for the stack frame. */
3449 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3450 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3451 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3452 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3453
3454 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3455 {
3456 if ( uNewEsp - 1 > cbLimitSS
3457 || uNewEsp < cbStackFrame)
3458 {
3459 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3460 u8Vector, NewSS, uNewEsp, cbStackFrame));
3461 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3462 }
3463 }
3464 else
3465 {
3466 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3467 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3468 {
3469 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3470 u8Vector, NewSS, uNewEsp, cbStackFrame));
3471 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3472 }
3473 }
3474
3475 /*
3476 * Start making changes.
3477 */
3478
3479 /* Create the stack frame. */
3480 RTPTRUNION uStackFrame;
3481 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3482 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3483 if (rcStrict != VINF_SUCCESS)
3484 return rcStrict;
3485 void * const pvStackFrame = uStackFrame.pv;
3486 if (f32BitGate)
3487 {
3488 if (fFlags & IEM_XCPT_FLAGS_ERR)
3489 *uStackFrame.pu32++ = uErr;
3490 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3491 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3492 uStackFrame.pu32[2] = fEfl;
3493 uStackFrame.pu32[3] = pCtx->esp;
3494 uStackFrame.pu32[4] = pCtx->ss.Sel;
3495 if (fEfl & X86_EFL_VM)
3496 {
3497 uStackFrame.pu32[1] = pCtx->cs.Sel;
3498 uStackFrame.pu32[5] = pCtx->es.Sel;
3499 uStackFrame.pu32[6] = pCtx->ds.Sel;
3500 uStackFrame.pu32[7] = pCtx->fs.Sel;
3501 uStackFrame.pu32[8] = pCtx->gs.Sel;
3502 }
3503 }
3504 else
3505 {
3506 if (fFlags & IEM_XCPT_FLAGS_ERR)
3507 *uStackFrame.pu16++ = uErr;
3508 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3509 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3510 uStackFrame.pu16[2] = fEfl;
3511 uStackFrame.pu16[3] = pCtx->sp;
3512 uStackFrame.pu16[4] = pCtx->ss.Sel;
3513 if (fEfl & X86_EFL_VM)
3514 {
3515 uStackFrame.pu16[1] = pCtx->cs.Sel;
3516 uStackFrame.pu16[5] = pCtx->es.Sel;
3517 uStackFrame.pu16[6] = pCtx->ds.Sel;
3518 uStackFrame.pu16[7] = pCtx->fs.Sel;
3519 uStackFrame.pu16[8] = pCtx->gs.Sel;
3520 }
3521 }
3522 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3523 if (rcStrict != VINF_SUCCESS)
3524 return rcStrict;
3525
3526 /* Mark the selectors 'accessed' (hope this is the correct time). */
3527 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3528 * after pushing the stack frame? (Write protect the gdt + stack to
3529 * find out.) */
3530 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3531 {
3532 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3533 if (rcStrict != VINF_SUCCESS)
3534 return rcStrict;
3535 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3536 }
3537
3538 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3539 {
3540 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3541 if (rcStrict != VINF_SUCCESS)
3542 return rcStrict;
3543 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3544 }
3545
3546 /*
3547 * Start comitting the register changes (joins with the DPL=CPL branch).
3548 */
3549 pCtx->ss.Sel = NewSS;
3550 pCtx->ss.ValidSel = NewSS;
3551 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3552 pCtx->ss.u32Limit = cbLimitSS;
3553 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3554 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3555 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
3556 pIemCpu->uCpl = uNewCpl;
3557
3558 if (fEfl & X86_EFL_VM)
3559 {
3560 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3561 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3562 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3563 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3564 }
3565 }
3566 /*
3567 * Same privilege, no stack change and smaller stack frame.
3568 */
3569 else
3570 {
3571 uint64_t uNewRsp;
3572 RTPTRUNION uStackFrame;
3573 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3574 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3575 if (rcStrict != VINF_SUCCESS)
3576 return rcStrict;
3577 void * const pvStackFrame = uStackFrame.pv;
3578
3579 if (f32BitGate)
3580 {
3581 if (fFlags & IEM_XCPT_FLAGS_ERR)
3582 *uStackFrame.pu32++ = uErr;
3583 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3584 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3585 uStackFrame.pu32[2] = fEfl;
3586 }
3587 else
3588 {
3589 if (fFlags & IEM_XCPT_FLAGS_ERR)
3590 *uStackFrame.pu16++ = uErr;
3591 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3592 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3593 uStackFrame.pu16[2] = fEfl;
3594 }
3595 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3596 if (rcStrict != VINF_SUCCESS)
3597 return rcStrict;
3598
3599 /* Mark the CS selector as 'accessed'. */
3600 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3601 {
3602 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3603 if (rcStrict != VINF_SUCCESS)
3604 return rcStrict;
3605 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3606 }
3607
3608 /*
3609 * Start committing the register changes (joins with the other branch).
3610 */
3611 pCtx->rsp = uNewRsp;
3612 }
3613
3614 /* ... register committing continues. */
3615 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3616 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3617 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3618 pCtx->cs.u32Limit = cbLimitCS;
3619 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3620 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3621
3622 pCtx->rip = uNewEip;
3623 fEfl &= ~fEflToClear;
3624 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3625
3626 if (fFlags & IEM_XCPT_FLAGS_CR2)
3627 pCtx->cr2 = uCr2;
3628
3629 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3630 iemRaiseXcptAdjustState(pCtx, u8Vector);
3631
3632 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3633}
3634
3635
3636/**
3637 * Implements exceptions and interrupts for long mode.
3638 *
3639 * @returns VBox strict status code.
3640 * @param pIemCpu The IEM per CPU instance data.
3641 * @param pCtx The CPU context.
3642 * @param cbInstr The number of bytes to offset rIP by in the return
3643 * address.
3644 * @param u8Vector The interrupt / exception vector number.
3645 * @param fFlags The flags.
3646 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3647 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3648 */
3649IEM_STATIC VBOXSTRICTRC
3650iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3651 PCPUMCTX pCtx,
3652 uint8_t cbInstr,
3653 uint8_t u8Vector,
3654 uint32_t fFlags,
3655 uint16_t uErr,
3656 uint64_t uCr2)
3657{
3658 /*
3659 * Read the IDT entry.
3660 */
3661 uint16_t offIdt = (uint16_t)u8Vector << 4;
3662 if (pCtx->idtr.cbIdt < offIdt + 7)
3663 {
3664 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3665 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3666 }
3667 X86DESC64 Idte;
3668 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3669 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3670 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3671 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3672 return rcStrict;
3673 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3674 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3675 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3676
3677 /*
3678 * Check the descriptor type, DPL and such.
3679 * ASSUMES this is done in the same order as described for call-gate calls.
3680 */
3681 if (Idte.Gate.u1DescType)
3682 {
3683 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3684 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3685 }
3686 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3687 switch (Idte.Gate.u4Type)
3688 {
3689 case AMD64_SEL_TYPE_SYS_INT_GATE:
3690 fEflToClear |= X86_EFL_IF;
3691 break;
3692 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3693 break;
3694
3695 default:
3696 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3697 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3698 }
3699
3700 /* Check DPL against CPL if applicable. */
3701 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3702 {
3703 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3704 {
3705 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3706 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3707 }
3708 }
3709
3710 /* Is it there? */
3711 if (!Idte.Gate.u1Present)
3712 {
3713 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3714 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3715 }
3716
3717 /* A null CS is bad. */
3718 RTSEL NewCS = Idte.Gate.u16Sel;
3719 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3720 {
3721 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3722 return iemRaiseGeneralProtectionFault0(pIemCpu);
3723 }
3724
3725 /* Fetch the descriptor for the new CS. */
3726 IEMSELDESC DescCS;
3727 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3728 if (rcStrict != VINF_SUCCESS)
3729 {
3730 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3731 return rcStrict;
3732 }
3733
3734 /* Must be a 64-bit code segment. */
3735 if (!DescCS.Long.Gen.u1DescType)
3736 {
3737 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3738 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3739 }
3740 if ( !DescCS.Long.Gen.u1Long
3741 || DescCS.Long.Gen.u1DefBig
3742 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3743 {
3744 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3745 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3746 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3747 }
3748
3749 /* Don't allow lowering the privilege level. For non-conforming CS
3750 selectors, the CS.DPL sets the privilege level the trap/interrupt
3751 handler runs at. For conforming CS selectors, the CPL remains
3752 unchanged, but the CS.DPL must be <= CPL. */
3753 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3754 * when CPU in Ring-0. Result \#GP? */
3755 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3756 {
3757 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3758 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3759 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3760 }
3761
3762
3763 /* Make sure the selector is present. */
3764 if (!DescCS.Legacy.Gen.u1Present)
3765 {
3766 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3767 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3768 }
3769
3770 /* Check that the new RIP is canonical. */
3771 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3772 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3773 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3774 if (!IEM_IS_CANONICAL(uNewRip))
3775 {
3776 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3777 return iemRaiseGeneralProtectionFault0(pIemCpu);
3778 }
3779
3780 /*
3781 * If the privilege level changes or if the IST isn't zero, we need to get
3782 * a new stack from the TSS.
3783 */
3784 uint64_t uNewRsp;
3785 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3786 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3787 if ( uNewCpl != pIemCpu->uCpl
3788 || Idte.Gate.u3IST != 0)
3789 {
3790 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3791 if (rcStrict != VINF_SUCCESS)
3792 return rcStrict;
3793 }
3794 else
3795 uNewRsp = pCtx->rsp;
3796 uNewRsp &= ~(uint64_t)0xf;
3797
3798 /*
3799 * Calc the flag image to push.
3800 */
3801 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3802 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3803 fEfl &= ~X86_EFL_RF;
3804 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3805 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3806
3807 /*
3808 * Start making changes.
3809 */
3810
3811 /* Create the stack frame. */
3812 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3813 RTPTRUNION uStackFrame;
3814 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3815 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3816 if (rcStrict != VINF_SUCCESS)
3817 return rcStrict;
3818 void * const pvStackFrame = uStackFrame.pv;
3819
3820 if (fFlags & IEM_XCPT_FLAGS_ERR)
3821 *uStackFrame.pu64++ = uErr;
3822 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3823 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3824 uStackFrame.pu64[2] = fEfl;
3825 uStackFrame.pu64[3] = pCtx->rsp;
3826 uStackFrame.pu64[4] = pCtx->ss.Sel;
3827 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3828 if (rcStrict != VINF_SUCCESS)
3829 return rcStrict;
3830
3831 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3832 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3833 * after pushing the stack frame? (Write protect the gdt + stack to
3834 * find out.) */
3835 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3836 {
3837 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3838 if (rcStrict != VINF_SUCCESS)
3839 return rcStrict;
3840 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3841 }
3842
3843 /*
3844 * Start comitting the register changes.
3845 */
3846 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3847 * hidden registers when interrupting 32-bit or 16-bit code! */
3848 if (uNewCpl != pIemCpu->uCpl)
3849 {
3850 pCtx->ss.Sel = 0 | uNewCpl;
3851 pCtx->ss.ValidSel = 0 | uNewCpl;
3852 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3853 pCtx->ss.u32Limit = UINT32_MAX;
3854 pCtx->ss.u64Base = 0;
3855 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3856 }
3857 pCtx->rsp = uNewRsp - cbStackFrame;
3858 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3859 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3860 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3861 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3862 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3863 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3864 pCtx->rip = uNewRip;
3865 pIemCpu->uCpl = uNewCpl;
3866
3867 fEfl &= ~fEflToClear;
3868 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3869
3870 if (fFlags & IEM_XCPT_FLAGS_CR2)
3871 pCtx->cr2 = uCr2;
3872
3873 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3874 iemRaiseXcptAdjustState(pCtx, u8Vector);
3875
3876 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3877}
3878
3879
3880/**
3881 * Implements exceptions and interrupts.
3882 *
3883 * All exceptions and interrupts goes thru this function!
3884 *
3885 * @returns VBox strict status code.
3886 * @param pIemCpu The IEM per CPU instance data.
3887 * @param cbInstr The number of bytes to offset rIP by in the return
3888 * address.
3889 * @param u8Vector The interrupt / exception vector number.
3890 * @param fFlags The flags.
3891 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3892 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3893 */
3894DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3895iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3896 uint8_t cbInstr,
3897 uint8_t u8Vector,
3898 uint32_t fFlags,
3899 uint16_t uErr,
3900 uint64_t uCr2)
3901{
3902 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3903#ifdef IN_RING0
3904 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3905 AssertRCReturn(rc, rc);
3906#endif
3907
3908 /*
3909 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3910 */
3911 if ( pCtx->eflags.Bits.u1VM
3912 && pCtx->eflags.Bits.u2IOPL != 3
3913 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3914 && (pCtx->cr0 & X86_CR0_PE) )
3915 {
3916 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3917 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3918 u8Vector = X86_XCPT_GP;
3919 uErr = 0;
3920 }
3921#ifdef DBGFTRACE_ENABLED
3922 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3923 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3924 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3925#endif
3926
3927 /*
3928 * Do recursion accounting.
3929 */
3930 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3931 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3932 if (pIemCpu->cXcptRecursions == 0)
3933 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3934 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3935 else
3936 {
3937 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3938 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3939
3940 /** @todo double and tripple faults. */
3941 if (pIemCpu->cXcptRecursions >= 3)
3942 {
3943#ifdef DEBUG_bird
3944 AssertFailed();
3945#endif
3946 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3947 }
3948
3949 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
3950 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
3951 {
3952 ....
3953 } */
3954 }
3955 pIemCpu->cXcptRecursions++;
3956 pIemCpu->uCurXcpt = u8Vector;
3957 pIemCpu->fCurXcpt = fFlags;
3958
3959 /*
3960 * Extensive logging.
3961 */
3962#if defined(LOG_ENABLED) && defined(IN_RING3)
3963 if (LogIs3Enabled())
3964 {
3965 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3966 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3967 char szRegs[4096];
3968 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3969 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3970 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3971 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3972 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3973 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3974 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3975 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3976 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3977 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3978 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3979 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3980 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3981 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3982 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3983 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3984 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3985 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3986 " efer=%016VR{efer}\n"
3987 " pat=%016VR{pat}\n"
3988 " sf_mask=%016VR{sf_mask}\n"
3989 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3990 " lstar=%016VR{lstar}\n"
3991 " star=%016VR{star} cstar=%016VR{cstar}\n"
3992 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3993 );
3994
3995 char szInstr[256];
3996 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3997 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3998 szInstr, sizeof(szInstr), NULL);
3999 Log3(("%s%s\n", szRegs, szInstr));
4000 }
4001#endif /* LOG_ENABLED */
4002
4003 /*
4004 * Call the mode specific worker function.
4005 */
4006 VBOXSTRICTRC rcStrict;
4007 if (!(pCtx->cr0 & X86_CR0_PE))
4008 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4009 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4010 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4011 else
4012 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4013
4014 /*
4015 * Unwind.
4016 */
4017 pIemCpu->cXcptRecursions--;
4018 pIemCpu->uCurXcpt = uPrevXcpt;
4019 pIemCpu->fCurXcpt = fPrevXcpt;
4020 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4021 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4022 return rcStrict;
4023}
4024
4025
4026/** \#DE - 00. */
4027DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4028{
4029 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4030}
4031
4032
4033/** \#DB - 01.
4034 * @note This automatically clear DR7.GD. */
4035DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4036{
4037 /** @todo set/clear RF. */
4038 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4039 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4040}
4041
4042
4043/** \#UD - 06. */
4044DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4045{
4046 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4047}
4048
4049
4050/** \#NM - 07. */
4051DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4052{
4053 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4054}
4055
4056
4057/** \#TS(err) - 0a. */
4058DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4059{
4060 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4061}
4062
4063
4064/** \#TS(tr) - 0a. */
4065DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4066{
4067 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4068 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4069}
4070
4071
4072/** \#TS(0) - 0a. */
4073DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4074{
4075 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4076 0, 0);
4077}
4078
4079
4080/** \#TS(err) - 0a. */
4081DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4082{
4083 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4084 uSel & X86_SEL_MASK_OFF_RPL, 0);
4085}
4086
4087
4088/** \#NP(err) - 0b. */
4089DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4090{
4091 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4092}
4093
4094
4095/** \#NP(seg) - 0b. */
4096DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4097{
4098 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4099 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4100}
4101
4102
4103/** \#NP(sel) - 0b. */
4104DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4105{
4106 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4107 uSel & ~X86_SEL_RPL, 0);
4108}
4109
4110
4111/** \#SS(seg) - 0c. */
4112DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4113{
4114 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4115 uSel & ~X86_SEL_RPL, 0);
4116}
4117
4118
4119/** \#SS(err) - 0c. */
4120DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4121{
4122 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4123}
4124
4125
4126/** \#GP(n) - 0d. */
4127DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4128{
4129 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4130}
4131
4132
4133/** \#GP(0) - 0d. */
4134DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4135{
4136 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4137}
4138
4139
4140/** \#GP(sel) - 0d. */
4141DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4142{
4143 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4144 Sel & ~X86_SEL_RPL, 0);
4145}
4146
4147
4148/** \#GP(0) - 0d. */
4149DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4150{
4151 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4152}
4153
4154
4155/** \#GP(sel) - 0d. */
4156DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4157{
4158 NOREF(iSegReg); NOREF(fAccess);
4159 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4160 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4161}
4162
4163
4164/** \#GP(sel) - 0d. */
4165DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4166{
4167 NOREF(Sel);
4168 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4169}
4170
4171
4172/** \#GP(sel) - 0d. */
4173DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4174{
4175 NOREF(iSegReg); NOREF(fAccess);
4176 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4177}
4178
4179
4180/** \#PF(n) - 0e. */
4181DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4182{
4183 uint16_t uErr;
4184 switch (rc)
4185 {
4186 case VERR_PAGE_NOT_PRESENT:
4187 case VERR_PAGE_TABLE_NOT_PRESENT:
4188 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4189 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4190 uErr = 0;
4191 break;
4192
4193 default:
4194 AssertMsgFailed(("%Rrc\n", rc));
4195 case VERR_ACCESS_DENIED:
4196 uErr = X86_TRAP_PF_P;
4197 break;
4198
4199 /** @todo reserved */
4200 }
4201
4202 if (pIemCpu->uCpl == 3)
4203 uErr |= X86_TRAP_PF_US;
4204
4205 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4206 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4207 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4208 uErr |= X86_TRAP_PF_ID;
4209
4210#if 0 /* This is so much non-sense, really. Why was it done like that? */
4211 /* Note! RW access callers reporting a WRITE protection fault, will clear
4212 the READ flag before calling. So, read-modify-write accesses (RW)
4213 can safely be reported as READ faults. */
4214 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4215 uErr |= X86_TRAP_PF_RW;
4216#else
4217 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4218 {
4219 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4220 uErr |= X86_TRAP_PF_RW;
4221 }
4222#endif
4223
4224 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4225 uErr, GCPtrWhere);
4226}
4227
4228
4229/** \#MF(0) - 10. */
4230DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4231{
4232 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4233}
4234
4235
4236/** \#AC(0) - 11. */
4237DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4238{
4239 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4240}
4241
4242
4243/**
4244 * Macro for calling iemCImplRaiseDivideError().
4245 *
4246 * This enables us to add/remove arguments and force different levels of
4247 * inlining as we wish.
4248 *
4249 * @return Strict VBox status code.
4250 */
4251#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4252IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4253{
4254 NOREF(cbInstr);
4255 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4256}
4257
4258
4259/**
4260 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4261 *
4262 * This enables us to add/remove arguments and force different levels of
4263 * inlining as we wish.
4264 *
4265 * @return Strict VBox status code.
4266 */
4267#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4268IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4269{
4270 NOREF(cbInstr);
4271 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4272}
4273
4274
4275/**
4276 * Macro for calling iemCImplRaiseInvalidOpcode().
4277 *
4278 * This enables us to add/remove arguments and force different levels of
4279 * inlining as we wish.
4280 *
4281 * @return Strict VBox status code.
4282 */
4283#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4284IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4285{
4286 NOREF(cbInstr);
4287 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4288}
4289
4290
4291/** @} */
4292
4293
4294/*
4295 *
4296 * Helpers routines.
4297 * Helpers routines.
4298 * Helpers routines.
4299 *
4300 */
4301
4302/**
4303 * Recalculates the effective operand size.
4304 *
4305 * @param pIemCpu The IEM state.
4306 */
4307IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4308{
4309 switch (pIemCpu->enmCpuMode)
4310 {
4311 case IEMMODE_16BIT:
4312 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4313 break;
4314 case IEMMODE_32BIT:
4315 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4316 break;
4317 case IEMMODE_64BIT:
4318 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4319 {
4320 case 0:
4321 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4322 break;
4323 case IEM_OP_PRF_SIZE_OP:
4324 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4325 break;
4326 case IEM_OP_PRF_SIZE_REX_W:
4327 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4328 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4329 break;
4330 }
4331 break;
4332 default:
4333 AssertFailed();
4334 }
4335}
4336
4337
4338/**
4339 * Sets the default operand size to 64-bit and recalculates the effective
4340 * operand size.
4341 *
4342 * @param pIemCpu The IEM state.
4343 */
4344IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4345{
4346 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4347 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4348 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4349 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4350 else
4351 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4352}
4353
4354
4355/*
4356 *
4357 * Common opcode decoders.
4358 * Common opcode decoders.
4359 * Common opcode decoders.
4360 *
4361 */
4362//#include <iprt/mem.h>
4363
4364/**
4365 * Used to add extra details about a stub case.
4366 * @param pIemCpu The IEM per CPU state.
4367 */
4368IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4369{
4370#if defined(LOG_ENABLED) && defined(IN_RING3)
4371 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4372 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4373 char szRegs[4096];
4374 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4375 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4376 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4377 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4378 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4379 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4380 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4381 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4382 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4383 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4384 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4385 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4386 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4387 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4388 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4389 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4390 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4391 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4392 " efer=%016VR{efer}\n"
4393 " pat=%016VR{pat}\n"
4394 " sf_mask=%016VR{sf_mask}\n"
4395 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4396 " lstar=%016VR{lstar}\n"
4397 " star=%016VR{star} cstar=%016VR{cstar}\n"
4398 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4399 );
4400
4401 char szInstr[256];
4402 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4403 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4404 szInstr, sizeof(szInstr), NULL);
4405
4406 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4407#else
4408 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4409#endif
4410}
4411
4412/**
4413 * Complains about a stub.
4414 *
4415 * Providing two versions of this macro, one for daily use and one for use when
4416 * working on IEM.
4417 */
4418#if 0
4419# define IEMOP_BITCH_ABOUT_STUB() \
4420 do { \
4421 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4422 iemOpStubMsg2(pIemCpu); \
4423 RTAssertPanic(); \
4424 } while (0)
4425#else
4426# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4427#endif
4428
4429/** Stubs an opcode. */
4430#define FNIEMOP_STUB(a_Name) \
4431 FNIEMOP_DEF(a_Name) \
4432 { \
4433 IEMOP_BITCH_ABOUT_STUB(); \
4434 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4435 } \
4436 typedef int ignore_semicolon
4437
4438/** Stubs an opcode. */
4439#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4440 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4441 { \
4442 IEMOP_BITCH_ABOUT_STUB(); \
4443 NOREF(a_Name0); \
4444 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4445 } \
4446 typedef int ignore_semicolon
4447
4448/** Stubs an opcode which currently should raise \#UD. */
4449#define FNIEMOP_UD_STUB(a_Name) \
4450 FNIEMOP_DEF(a_Name) \
4451 { \
4452 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4453 return IEMOP_RAISE_INVALID_OPCODE(); \
4454 } \
4455 typedef int ignore_semicolon
4456
4457/** Stubs an opcode which currently should raise \#UD. */
4458#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4459 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4460 { \
4461 NOREF(a_Name0); \
4462 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4463 return IEMOP_RAISE_INVALID_OPCODE(); \
4464 } \
4465 typedef int ignore_semicolon
4466
4467
4468
4469/** @name Register Access.
4470 * @{
4471 */
4472
4473/**
4474 * Gets a reference (pointer) to the specified hidden segment register.
4475 *
4476 * @returns Hidden register reference.
4477 * @param pIemCpu The per CPU data.
4478 * @param iSegReg The segment register.
4479 */
4480IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4481{
4482 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4483 PCPUMSELREG pSReg;
4484 switch (iSegReg)
4485 {
4486 case X86_SREG_ES: pSReg = &pCtx->es; break;
4487 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4488 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4489 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4490 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4491 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4492 default:
4493 AssertFailedReturn(NULL);
4494 }
4495#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4496 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4497 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4498#else
4499 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4500#endif
4501 return pSReg;
4502}
4503
4504
4505/**
4506 * Gets a reference (pointer) to the specified segment register (the selector
4507 * value).
4508 *
4509 * @returns Pointer to the selector variable.
4510 * @param pIemCpu The per CPU data.
4511 * @param iSegReg The segment register.
4512 */
4513IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4514{
4515 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4516 switch (iSegReg)
4517 {
4518 case X86_SREG_ES: return &pCtx->es.Sel;
4519 case X86_SREG_CS: return &pCtx->cs.Sel;
4520 case X86_SREG_SS: return &pCtx->ss.Sel;
4521 case X86_SREG_DS: return &pCtx->ds.Sel;
4522 case X86_SREG_FS: return &pCtx->fs.Sel;
4523 case X86_SREG_GS: return &pCtx->gs.Sel;
4524 }
4525 AssertFailedReturn(NULL);
4526}
4527
4528
4529/**
4530 * Fetches the selector value of a segment register.
4531 *
4532 * @returns The selector value.
4533 * @param pIemCpu The per CPU data.
4534 * @param iSegReg The segment register.
4535 */
4536IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4537{
4538 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4539 switch (iSegReg)
4540 {
4541 case X86_SREG_ES: return pCtx->es.Sel;
4542 case X86_SREG_CS: return pCtx->cs.Sel;
4543 case X86_SREG_SS: return pCtx->ss.Sel;
4544 case X86_SREG_DS: return pCtx->ds.Sel;
4545 case X86_SREG_FS: return pCtx->fs.Sel;
4546 case X86_SREG_GS: return pCtx->gs.Sel;
4547 }
4548 AssertFailedReturn(0xffff);
4549}
4550
4551
4552/**
4553 * Gets a reference (pointer) to the specified general register.
4554 *
4555 * @returns Register reference.
4556 * @param pIemCpu The per CPU data.
4557 * @param iReg The general register.
4558 */
4559IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4560{
4561 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4562 switch (iReg)
4563 {
4564 case X86_GREG_xAX: return &pCtx->rax;
4565 case X86_GREG_xCX: return &pCtx->rcx;
4566 case X86_GREG_xDX: return &pCtx->rdx;
4567 case X86_GREG_xBX: return &pCtx->rbx;
4568 case X86_GREG_xSP: return &pCtx->rsp;
4569 case X86_GREG_xBP: return &pCtx->rbp;
4570 case X86_GREG_xSI: return &pCtx->rsi;
4571 case X86_GREG_xDI: return &pCtx->rdi;
4572 case X86_GREG_x8: return &pCtx->r8;
4573 case X86_GREG_x9: return &pCtx->r9;
4574 case X86_GREG_x10: return &pCtx->r10;
4575 case X86_GREG_x11: return &pCtx->r11;
4576 case X86_GREG_x12: return &pCtx->r12;
4577 case X86_GREG_x13: return &pCtx->r13;
4578 case X86_GREG_x14: return &pCtx->r14;
4579 case X86_GREG_x15: return &pCtx->r15;
4580 }
4581 AssertFailedReturn(NULL);
4582}
4583
4584
4585/**
4586 * Gets a reference (pointer) to the specified 8-bit general register.
4587 *
4588 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4589 *
4590 * @returns Register reference.
4591 * @param pIemCpu The per CPU data.
4592 * @param iReg The register.
4593 */
4594IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4595{
4596 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4597 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4598
4599 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4600 if (iReg >= 4)
4601 pu8Reg++;
4602 return pu8Reg;
4603}
4604
4605
4606/**
4607 * Fetches the value of a 8-bit general register.
4608 *
4609 * @returns The register value.
4610 * @param pIemCpu The per CPU data.
4611 * @param iReg The register.
4612 */
4613IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4614{
4615 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4616 return *pbSrc;
4617}
4618
4619
4620/**
4621 * Fetches the value of a 16-bit general register.
4622 *
4623 * @returns The register value.
4624 * @param pIemCpu The per CPU data.
4625 * @param iReg The register.
4626 */
4627IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4628{
4629 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4630}
4631
4632
4633/**
4634 * Fetches the value of a 32-bit general register.
4635 *
4636 * @returns The register value.
4637 * @param pIemCpu The per CPU data.
4638 * @param iReg The register.
4639 */
4640IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4641{
4642 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4643}
4644
4645
4646/**
4647 * Fetches the value of a 64-bit general register.
4648 *
4649 * @returns The register value.
4650 * @param pIemCpu The per CPU data.
4651 * @param iReg The register.
4652 */
4653IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4654{
4655 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4656}
4657
4658
4659/**
4660 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4661 *
4662 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4663 * segment limit.
4664 *
4665 * @param pIemCpu The per CPU data.
4666 * @param offNextInstr The offset of the next instruction.
4667 */
4668IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4669{
4670 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4671 switch (pIemCpu->enmEffOpSize)
4672 {
4673 case IEMMODE_16BIT:
4674 {
4675 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4676 if ( uNewIp > pCtx->cs.u32Limit
4677 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4678 return iemRaiseGeneralProtectionFault0(pIemCpu);
4679 pCtx->rip = uNewIp;
4680 break;
4681 }
4682
4683 case IEMMODE_32BIT:
4684 {
4685 Assert(pCtx->rip <= UINT32_MAX);
4686 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4687
4688 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4689 if (uNewEip > pCtx->cs.u32Limit)
4690 return iemRaiseGeneralProtectionFault0(pIemCpu);
4691 pCtx->rip = uNewEip;
4692 break;
4693 }
4694
4695 case IEMMODE_64BIT:
4696 {
4697 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4698
4699 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4700 if (!IEM_IS_CANONICAL(uNewRip))
4701 return iemRaiseGeneralProtectionFault0(pIemCpu);
4702 pCtx->rip = uNewRip;
4703 break;
4704 }
4705
4706 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4707 }
4708
4709 pCtx->eflags.Bits.u1RF = 0;
4710 return VINF_SUCCESS;
4711}
4712
4713
4714/**
4715 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4716 *
4717 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4718 * segment limit.
4719 *
4720 * @returns Strict VBox status code.
4721 * @param pIemCpu The per CPU data.
4722 * @param offNextInstr The offset of the next instruction.
4723 */
4724IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4725{
4726 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4727 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4728
4729 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4730 if ( uNewIp > pCtx->cs.u32Limit
4731 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4732 return iemRaiseGeneralProtectionFault0(pIemCpu);
4733 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4734 pCtx->rip = uNewIp;
4735 pCtx->eflags.Bits.u1RF = 0;
4736
4737 return VINF_SUCCESS;
4738}
4739
4740
4741/**
4742 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4743 *
4744 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4745 * segment limit.
4746 *
4747 * @returns Strict VBox status code.
4748 * @param pIemCpu The per CPU data.
4749 * @param offNextInstr The offset of the next instruction.
4750 */
4751IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4752{
4753 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4754 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4755
4756 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4757 {
4758 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4759
4760 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4761 if (uNewEip > pCtx->cs.u32Limit)
4762 return iemRaiseGeneralProtectionFault0(pIemCpu);
4763 pCtx->rip = uNewEip;
4764 }
4765 else
4766 {
4767 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4768
4769 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4770 if (!IEM_IS_CANONICAL(uNewRip))
4771 return iemRaiseGeneralProtectionFault0(pIemCpu);
4772 pCtx->rip = uNewRip;
4773 }
4774 pCtx->eflags.Bits.u1RF = 0;
4775 return VINF_SUCCESS;
4776}
4777
4778
4779/**
4780 * Performs a near jump to the specified address.
4781 *
4782 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4783 * segment limit.
4784 *
4785 * @param pIemCpu The per CPU data.
4786 * @param uNewRip The new RIP value.
4787 */
4788IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4789{
4790 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4791 switch (pIemCpu->enmEffOpSize)
4792 {
4793 case IEMMODE_16BIT:
4794 {
4795 Assert(uNewRip <= UINT16_MAX);
4796 if ( uNewRip > pCtx->cs.u32Limit
4797 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4798 return iemRaiseGeneralProtectionFault0(pIemCpu);
4799 /** @todo Test 16-bit jump in 64-bit mode. */
4800 pCtx->rip = uNewRip;
4801 break;
4802 }
4803
4804 case IEMMODE_32BIT:
4805 {
4806 Assert(uNewRip <= UINT32_MAX);
4807 Assert(pCtx->rip <= UINT32_MAX);
4808 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4809
4810 if (uNewRip > pCtx->cs.u32Limit)
4811 return iemRaiseGeneralProtectionFault0(pIemCpu);
4812 pCtx->rip = uNewRip;
4813 break;
4814 }
4815
4816 case IEMMODE_64BIT:
4817 {
4818 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4819
4820 if (!IEM_IS_CANONICAL(uNewRip))
4821 return iemRaiseGeneralProtectionFault0(pIemCpu);
4822 pCtx->rip = uNewRip;
4823 break;
4824 }
4825
4826 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4827 }
4828
4829 pCtx->eflags.Bits.u1RF = 0;
4830 return VINF_SUCCESS;
4831}
4832
4833
4834/**
4835 * Get the address of the top of the stack.
4836 *
4837 * @param pIemCpu The per CPU data.
4838 * @param pCtx The CPU context which SP/ESP/RSP should be
4839 * read.
4840 */
4841DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4842{
4843 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4844 return pCtx->rsp;
4845 if (pCtx->ss.Attr.n.u1DefBig)
4846 return pCtx->esp;
4847 return pCtx->sp;
4848}
4849
4850
4851/**
4852 * Updates the RIP/EIP/IP to point to the next instruction.
4853 *
4854 * This function leaves the EFLAGS.RF flag alone.
4855 *
4856 * @param pIemCpu The per CPU data.
4857 * @param cbInstr The number of bytes to add.
4858 */
4859IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4860{
4861 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4862 switch (pIemCpu->enmCpuMode)
4863 {
4864 case IEMMODE_16BIT:
4865 Assert(pCtx->rip <= UINT16_MAX);
4866 pCtx->eip += cbInstr;
4867 pCtx->eip &= UINT32_C(0xffff);
4868 break;
4869
4870 case IEMMODE_32BIT:
4871 pCtx->eip += cbInstr;
4872 Assert(pCtx->rip <= UINT32_MAX);
4873 break;
4874
4875 case IEMMODE_64BIT:
4876 pCtx->rip += cbInstr;
4877 break;
4878 default: AssertFailed();
4879 }
4880}
4881
4882
4883#if 0
4884/**
4885 * Updates the RIP/EIP/IP to point to the next instruction.
4886 *
4887 * @param pIemCpu The per CPU data.
4888 */
4889IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4890{
4891 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4892}
4893#endif
4894
4895
4896
4897/**
4898 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4899 *
4900 * @param pIemCpu The per CPU data.
4901 * @param cbInstr The number of bytes to add.
4902 */
4903IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4904{
4905 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4906
4907 pCtx->eflags.Bits.u1RF = 0;
4908
4909 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4910 switch (pIemCpu->enmCpuMode)
4911 {
4912 /** @todo investigate if EIP or RIP is really incremented. */
4913 case IEMMODE_16BIT:
4914 case IEMMODE_32BIT:
4915 pCtx->eip += cbInstr;
4916 Assert(pCtx->rip <= UINT32_MAX);
4917 break;
4918
4919 case IEMMODE_64BIT:
4920 pCtx->rip += cbInstr;
4921 break;
4922 default: AssertFailed();
4923 }
4924}
4925
4926
4927/**
4928 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4929 *
4930 * @param pIemCpu The per CPU data.
4931 */
4932IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4933{
4934 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4935}
4936
4937
4938/**
4939 * Adds to the stack pointer.
4940 *
4941 * @param pIemCpu The per CPU data.
4942 * @param pCtx The CPU context which SP/ESP/RSP should be
4943 * updated.
4944 * @param cbToAdd The number of bytes to add.
4945 */
4946DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
4947{
4948 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4949 pCtx->rsp += cbToAdd;
4950 else if (pCtx->ss.Attr.n.u1DefBig)
4951 pCtx->esp += cbToAdd;
4952 else
4953 pCtx->sp += cbToAdd;
4954}
4955
4956
4957/**
4958 * Subtracts from the stack pointer.
4959 *
4960 * @param pIemCpu The per CPU data.
4961 * @param pCtx The CPU context which SP/ESP/RSP should be
4962 * updated.
4963 * @param cbToSub The number of bytes to subtract.
4964 */
4965DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4966{
4967 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4968 pCtx->rsp -= cbToSub;
4969 else if (pCtx->ss.Attr.n.u1DefBig)
4970 pCtx->esp -= cbToSub;
4971 else
4972 pCtx->sp -= cbToSub;
4973}
4974
4975
4976/**
4977 * Adds to the temporary stack pointer.
4978 *
4979 * @param pIemCpu The per CPU data.
4980 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4981 * @param cbToAdd The number of bytes to add.
4982 * @param pCtx Where to get the current stack mode.
4983 */
4984DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4985{
4986 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4987 pTmpRsp->u += cbToAdd;
4988 else if (pCtx->ss.Attr.n.u1DefBig)
4989 pTmpRsp->DWords.dw0 += cbToAdd;
4990 else
4991 pTmpRsp->Words.w0 += cbToAdd;
4992}
4993
4994
4995/**
4996 * Subtracts from the temporary stack pointer.
4997 *
4998 * @param pIemCpu The per CPU data.
4999 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5000 * @param cbToSub The number of bytes to subtract.
5001 * @param pCtx Where to get the current stack mode.
5002 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5003 * expecting that.
5004 */
5005DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5006{
5007 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5008 pTmpRsp->u -= cbToSub;
5009 else if (pCtx->ss.Attr.n.u1DefBig)
5010 pTmpRsp->DWords.dw0 -= cbToSub;
5011 else
5012 pTmpRsp->Words.w0 -= cbToSub;
5013}
5014
5015
5016/**
5017 * Calculates the effective stack address for a push of the specified size as
5018 * well as the new RSP value (upper bits may be masked).
5019 *
5020 * @returns Effective stack addressf for the push.
5021 * @param pIemCpu The IEM per CPU data.
5022 * @param pCtx Where to get the current stack mode.
5023 * @param cbItem The size of the stack item to pop.
5024 * @param puNewRsp Where to return the new RSP value.
5025 */
5026DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5027{
5028 RTUINT64U uTmpRsp;
5029 RTGCPTR GCPtrTop;
5030 uTmpRsp.u = pCtx->rsp;
5031
5032 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5033 GCPtrTop = uTmpRsp.u -= cbItem;
5034 else if (pCtx->ss.Attr.n.u1DefBig)
5035 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5036 else
5037 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5038 *puNewRsp = uTmpRsp.u;
5039 return GCPtrTop;
5040}
5041
5042
5043/**
5044 * Gets the current stack pointer and calculates the value after a pop of the
5045 * specified size.
5046 *
5047 * @returns Current stack pointer.
5048 * @param pIemCpu The per CPU data.
5049 * @param pCtx Where to get the current stack mode.
5050 * @param cbItem The size of the stack item to pop.
5051 * @param puNewRsp Where to return the new RSP value.
5052 */
5053DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5054{
5055 RTUINT64U uTmpRsp;
5056 RTGCPTR GCPtrTop;
5057 uTmpRsp.u = pCtx->rsp;
5058
5059 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5060 {
5061 GCPtrTop = uTmpRsp.u;
5062 uTmpRsp.u += cbItem;
5063 }
5064 else if (pCtx->ss.Attr.n.u1DefBig)
5065 {
5066 GCPtrTop = uTmpRsp.DWords.dw0;
5067 uTmpRsp.DWords.dw0 += cbItem;
5068 }
5069 else
5070 {
5071 GCPtrTop = uTmpRsp.Words.w0;
5072 uTmpRsp.Words.w0 += cbItem;
5073 }
5074 *puNewRsp = uTmpRsp.u;
5075 return GCPtrTop;
5076}
5077
5078
5079/**
5080 * Calculates the effective stack address for a push of the specified size as
5081 * well as the new temporary RSP value (upper bits may be masked).
5082 *
5083 * @returns Effective stack addressf for the push.
5084 * @param pIemCpu The per CPU data.
5085 * @param pTmpRsp The temporary stack pointer. This is updated.
5086 * @param cbItem The size of the stack item to pop.
5087 * @param puNewRsp Where to return the new RSP value.
5088 */
5089DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5090{
5091 RTGCPTR GCPtrTop;
5092
5093 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5094 GCPtrTop = pTmpRsp->u -= cbItem;
5095 else if (pCtx->ss.Attr.n.u1DefBig)
5096 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5097 else
5098 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5099 return GCPtrTop;
5100}
5101
5102
5103/**
5104 * Gets the effective stack address for a pop of the specified size and
5105 * calculates and updates the temporary RSP.
5106 *
5107 * @returns Current stack pointer.
5108 * @param pIemCpu The per CPU data.
5109 * @param pTmpRsp The temporary stack pointer. This is updated.
5110 * @param pCtx Where to get the current stack mode.
5111 * @param cbItem The size of the stack item to pop.
5112 */
5113DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5114{
5115 RTGCPTR GCPtrTop;
5116 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5117 {
5118 GCPtrTop = pTmpRsp->u;
5119 pTmpRsp->u += cbItem;
5120 }
5121 else if (pCtx->ss.Attr.n.u1DefBig)
5122 {
5123 GCPtrTop = pTmpRsp->DWords.dw0;
5124 pTmpRsp->DWords.dw0 += cbItem;
5125 }
5126 else
5127 {
5128 GCPtrTop = pTmpRsp->Words.w0;
5129 pTmpRsp->Words.w0 += cbItem;
5130 }
5131 return GCPtrTop;
5132}
5133
5134/** @} */
5135
5136
5137/** @name FPU access and helpers.
5138 *
5139 * @{
5140 */
5141
5142
5143/**
5144 * Hook for preparing to use the host FPU.
5145 *
5146 * This is necessary in ring-0 and raw-mode context.
5147 *
5148 * @param pIemCpu The IEM per CPU data.
5149 */
5150DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5151{
5152#ifdef IN_RING3
5153 NOREF(pIemCpu);
5154#else
5155/** @todo RZ: FIXME */
5156//# error "Implement me"
5157#endif
5158}
5159
5160
5161/**
5162 * Hook for preparing to use the host FPU for SSE
5163 *
5164 * This is necessary in ring-0 and raw-mode context.
5165 *
5166 * @param pIemCpu The IEM per CPU data.
5167 */
5168DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5169{
5170 iemFpuPrepareUsage(pIemCpu);
5171}
5172
5173
5174/**
5175 * Stores a QNaN value into a FPU register.
5176 *
5177 * @param pReg Pointer to the register.
5178 */
5179DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5180{
5181 pReg->au32[0] = UINT32_C(0x00000000);
5182 pReg->au32[1] = UINT32_C(0xc0000000);
5183 pReg->au16[4] = UINT16_C(0xffff);
5184}
5185
5186
5187/**
5188 * Updates the FOP, FPU.CS and FPUIP registers.
5189 *
5190 * @param pIemCpu The IEM per CPU data.
5191 * @param pCtx The CPU context.
5192 * @param pFpuCtx The FPU context.
5193 */
5194DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5195{
5196 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5197 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5198 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5199 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5200 {
5201 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5202 * happens in real mode here based on the fnsave and fnstenv images. */
5203 pFpuCtx->CS = 0;
5204 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5205 }
5206 else
5207 {
5208 pFpuCtx->CS = pCtx->cs.Sel;
5209 pFpuCtx->FPUIP = pCtx->rip;
5210 }
5211}
5212
5213
5214/**
5215 * Updates the x87.DS and FPUDP registers.
5216 *
5217 * @param pIemCpu The IEM per CPU data.
5218 * @param pCtx The CPU context.
5219 * @param pFpuCtx The FPU context.
5220 * @param iEffSeg The effective segment register.
5221 * @param GCPtrEff The effective address relative to @a iEffSeg.
5222 */
5223DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5224{
5225 RTSEL sel;
5226 switch (iEffSeg)
5227 {
5228 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5229 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5230 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5231 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5232 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5233 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5234 default:
5235 AssertMsgFailed(("%d\n", iEffSeg));
5236 sel = pCtx->ds.Sel;
5237 }
5238 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5239 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5240 {
5241 pFpuCtx->DS = 0;
5242 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5243 }
5244 else
5245 {
5246 pFpuCtx->DS = sel;
5247 pFpuCtx->FPUDP = GCPtrEff;
5248 }
5249}
5250
5251
5252/**
5253 * Rotates the stack registers in the push direction.
5254 *
5255 * @param pFpuCtx The FPU context.
5256 * @remarks This is a complete waste of time, but fxsave stores the registers in
5257 * stack order.
5258 */
5259DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5260{
5261 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5262 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5263 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5264 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5265 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5266 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5267 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5268 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5269 pFpuCtx->aRegs[0].r80 = r80Tmp;
5270}
5271
5272
5273/**
5274 * Rotates the stack registers in the pop direction.
5275 *
5276 * @param pFpuCtx The FPU context.
5277 * @remarks This is a complete waste of time, but fxsave stores the registers in
5278 * stack order.
5279 */
5280DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5281{
5282 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5283 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5284 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5285 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5286 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5287 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5288 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5289 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5290 pFpuCtx->aRegs[7].r80 = r80Tmp;
5291}
5292
5293
5294/**
5295 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5296 * exception prevents it.
5297 *
5298 * @param pIemCpu The IEM per CPU data.
5299 * @param pResult The FPU operation result to push.
5300 * @param pFpuCtx The FPU context.
5301 */
5302IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5303{
5304 /* Update FSW and bail if there are pending exceptions afterwards. */
5305 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5306 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5307 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5308 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5309 {
5310 pFpuCtx->FSW = fFsw;
5311 return;
5312 }
5313
5314 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5315 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5316 {
5317 /* All is fine, push the actual value. */
5318 pFpuCtx->FTW |= RT_BIT(iNewTop);
5319 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5320 }
5321 else if (pFpuCtx->FCW & X86_FCW_IM)
5322 {
5323 /* Masked stack overflow, push QNaN. */
5324 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5325 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5326 }
5327 else
5328 {
5329 /* Raise stack overflow, don't push anything. */
5330 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5331 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5332 return;
5333 }
5334
5335 fFsw &= ~X86_FSW_TOP_MASK;
5336 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5337 pFpuCtx->FSW = fFsw;
5338
5339 iemFpuRotateStackPush(pFpuCtx);
5340}
5341
5342
5343/**
5344 * Stores a result in a FPU register and updates the FSW and FTW.
5345 *
5346 * @param pFpuCtx The FPU context.
5347 * @param pResult The result to store.
5348 * @param iStReg Which FPU register to store it in.
5349 */
5350IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5351{
5352 Assert(iStReg < 8);
5353 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5354 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5355 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5356 pFpuCtx->FTW |= RT_BIT(iReg);
5357 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5358}
5359
5360
5361/**
5362 * Only updates the FPU status word (FSW) with the result of the current
5363 * instruction.
5364 *
5365 * @param pFpuCtx The FPU context.
5366 * @param u16FSW The FSW output of the current instruction.
5367 */
5368IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5369{
5370 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5371 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5372}
5373
5374
5375/**
5376 * Pops one item off the FPU stack if no pending exception prevents it.
5377 *
5378 * @param pFpuCtx The FPU context.
5379 */
5380IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5381{
5382 /* Check pending exceptions. */
5383 uint16_t uFSW = pFpuCtx->FSW;
5384 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5385 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5386 return;
5387
5388 /* TOP--. */
5389 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5390 uFSW &= ~X86_FSW_TOP_MASK;
5391 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5392 pFpuCtx->FSW = uFSW;
5393
5394 /* Mark the previous ST0 as empty. */
5395 iOldTop >>= X86_FSW_TOP_SHIFT;
5396 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5397
5398 /* Rotate the registers. */
5399 iemFpuRotateStackPop(pFpuCtx);
5400}
5401
5402
5403/**
5404 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5405 *
5406 * @param pIemCpu The IEM per CPU data.
5407 * @param pResult The FPU operation result to push.
5408 */
5409IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5410{
5411 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5412 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5413 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5414 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5415}
5416
5417
5418/**
5419 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5420 * and sets FPUDP and FPUDS.
5421 *
5422 * @param pIemCpu The IEM per CPU data.
5423 * @param pResult The FPU operation result to push.
5424 * @param iEffSeg The effective segment register.
5425 * @param GCPtrEff The effective address relative to @a iEffSeg.
5426 */
5427IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5428{
5429 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5430 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5431 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5432 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5433 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5434}
5435
5436
5437/**
5438 * Replace ST0 with the first value and push the second onto the FPU stack,
5439 * unless a pending exception prevents it.
5440 *
5441 * @param pIemCpu The IEM per CPU data.
5442 * @param pResult The FPU operation result to store and push.
5443 */
5444IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5445{
5446 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5447 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5448 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5449
5450 /* Update FSW and bail if there are pending exceptions afterwards. */
5451 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5452 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5453 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5454 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5455 {
5456 pFpuCtx->FSW = fFsw;
5457 return;
5458 }
5459
5460 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5461 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5462 {
5463 /* All is fine, push the actual value. */
5464 pFpuCtx->FTW |= RT_BIT(iNewTop);
5465 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5466 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5467 }
5468 else if (pFpuCtx->FCW & X86_FCW_IM)
5469 {
5470 /* Masked stack overflow, push QNaN. */
5471 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5472 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5473 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5474 }
5475 else
5476 {
5477 /* Raise stack overflow, don't push anything. */
5478 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5479 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5480 return;
5481 }
5482
5483 fFsw &= ~X86_FSW_TOP_MASK;
5484 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5485 pFpuCtx->FSW = fFsw;
5486
5487 iemFpuRotateStackPush(pFpuCtx);
5488}
5489
5490
5491/**
5492 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5493 * FOP.
5494 *
5495 * @param pIemCpu The IEM per CPU data.
5496 * @param pResult The result to store.
5497 * @param iStReg Which FPU register to store it in.
5498 * @param pCtx The CPU context.
5499 */
5500IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5501{
5502 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5503 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5504 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5505 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5506}
5507
5508
5509/**
5510 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5511 * FOP, and then pops the stack.
5512 *
5513 * @param pIemCpu The IEM per CPU data.
5514 * @param pResult The result to store.
5515 * @param iStReg Which FPU register to store it in.
5516 * @param pCtx The CPU context.
5517 */
5518IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5519{
5520 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5521 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5522 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5523 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5524 iemFpuMaybePopOne(pFpuCtx);
5525}
5526
5527
5528/**
5529 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5530 * FPUDP, and FPUDS.
5531 *
5532 * @param pIemCpu The IEM per CPU data.
5533 * @param pResult The result to store.
5534 * @param iStReg Which FPU register to store it in.
5535 * @param pCtx The CPU context.
5536 * @param iEffSeg The effective memory operand selector register.
5537 * @param GCPtrEff The effective memory operand offset.
5538 */
5539IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5540{
5541 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5542 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5543 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5544 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5545 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5546}
5547
5548
5549/**
5550 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5551 * FPUDP, and FPUDS, and then pops the stack.
5552 *
5553 * @param pIemCpu The IEM per CPU data.
5554 * @param pResult The result to store.
5555 * @param iStReg Which FPU register to store it in.
5556 * @param pCtx The CPU context.
5557 * @param iEffSeg The effective memory operand selector register.
5558 * @param GCPtrEff The effective memory operand offset.
5559 */
5560IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5561 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5562{
5563 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5564 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5565 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5566 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5567 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5568 iemFpuMaybePopOne(pFpuCtx);
5569}
5570
5571
5572/**
5573 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5574 *
5575 * @param pIemCpu The IEM per CPU data.
5576 */
5577IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5578{
5579 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5580 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5581 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5582}
5583
5584
5585/**
5586 * Marks the specified stack register as free (for FFREE).
5587 *
5588 * @param pIemCpu The IEM per CPU data.
5589 * @param iStReg The register to free.
5590 */
5591IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5592{
5593 Assert(iStReg < 8);
5594 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5595 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5596 pFpuCtx->FTW &= ~RT_BIT(iReg);
5597}
5598
5599
5600/**
5601 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5602 *
5603 * @param pIemCpu The IEM per CPU data.
5604 */
5605IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5606{
5607 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5608 uint16_t uFsw = pFpuCtx->FSW;
5609 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5610 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5611 uFsw &= ~X86_FSW_TOP_MASK;
5612 uFsw |= uTop;
5613 pFpuCtx->FSW = uFsw;
5614}
5615
5616
5617/**
5618 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5619 *
5620 * @param pIemCpu The IEM per CPU data.
5621 */
5622IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5623{
5624 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5625 uint16_t uFsw = pFpuCtx->FSW;
5626 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5627 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5628 uFsw &= ~X86_FSW_TOP_MASK;
5629 uFsw |= uTop;
5630 pFpuCtx->FSW = uFsw;
5631}
5632
5633
5634/**
5635 * Updates the FSW, FOP, FPUIP, and FPUCS.
5636 *
5637 * @param pIemCpu The IEM per CPU data.
5638 * @param u16FSW The FSW from the current instruction.
5639 */
5640IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5641{
5642 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5643 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5644 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5645 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5646}
5647
5648
5649/**
5650 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5651 *
5652 * @param pIemCpu The IEM per CPU data.
5653 * @param u16FSW The FSW from the current instruction.
5654 */
5655IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5656{
5657 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5658 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5659 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5660 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5661 iemFpuMaybePopOne(pFpuCtx);
5662}
5663
5664
5665/**
5666 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5667 *
5668 * @param pIemCpu The IEM per CPU data.
5669 * @param u16FSW The FSW from the current instruction.
5670 * @param iEffSeg The effective memory operand selector register.
5671 * @param GCPtrEff The effective memory operand offset.
5672 */
5673IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5674{
5675 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5676 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5677 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5678 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5679 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5680}
5681
5682
5683/**
5684 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5685 *
5686 * @param pIemCpu The IEM per CPU data.
5687 * @param u16FSW The FSW from the current instruction.
5688 */
5689IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5690{
5691 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5692 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5693 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5694 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5695 iemFpuMaybePopOne(pFpuCtx);
5696 iemFpuMaybePopOne(pFpuCtx);
5697}
5698
5699
5700/**
5701 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5702 *
5703 * @param pIemCpu The IEM per CPU data.
5704 * @param u16FSW The FSW from the current instruction.
5705 * @param iEffSeg The effective memory operand selector register.
5706 * @param GCPtrEff The effective memory operand offset.
5707 */
5708IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5709{
5710 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5711 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5712 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5713 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5714 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5715 iemFpuMaybePopOne(pFpuCtx);
5716}
5717
5718
5719/**
5720 * Worker routine for raising an FPU stack underflow exception.
5721 *
5722 * @param pIemCpu The IEM per CPU data.
5723 * @param pFpuCtx The FPU context.
5724 * @param iStReg The stack register being accessed.
5725 */
5726IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5727{
5728 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5729 if (pFpuCtx->FCW & X86_FCW_IM)
5730 {
5731 /* Masked underflow. */
5732 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5733 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5734 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5735 if (iStReg != UINT8_MAX)
5736 {
5737 pFpuCtx->FTW |= RT_BIT(iReg);
5738 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5739 }
5740 }
5741 else
5742 {
5743 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5744 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5745 }
5746}
5747
5748
5749/**
5750 * Raises a FPU stack underflow exception.
5751 *
5752 * @param pIemCpu The IEM per CPU data.
5753 * @param iStReg The destination register that should be loaded
5754 * with QNaN if \#IS is not masked. Specify
5755 * UINT8_MAX if none (like for fcom).
5756 */
5757DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5758{
5759 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5760 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5761 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5762 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5763}
5764
5765
5766DECL_NO_INLINE(IEM_STATIC, void)
5767iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5768{
5769 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5770 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5771 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5772 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5773 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5774}
5775
5776
5777DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5778{
5779 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5780 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5781 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5782 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5783 iemFpuMaybePopOne(pFpuCtx);
5784}
5785
5786
5787DECL_NO_INLINE(IEM_STATIC, void)
5788iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5789{
5790 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5791 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5792 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5793 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5794 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5795 iemFpuMaybePopOne(pFpuCtx);
5796}
5797
5798
5799DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5800{
5801 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5802 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5803 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5804 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5805 iemFpuMaybePopOne(pFpuCtx);
5806 iemFpuMaybePopOne(pFpuCtx);
5807}
5808
5809
5810DECL_NO_INLINE(IEM_STATIC, void)
5811iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5812{
5813 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5814 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5815 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5816
5817 if (pFpuCtx->FCW & X86_FCW_IM)
5818 {
5819 /* Masked overflow - Push QNaN. */
5820 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5821 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5822 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5823 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5824 pFpuCtx->FTW |= RT_BIT(iNewTop);
5825 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5826 iemFpuRotateStackPush(pFpuCtx);
5827 }
5828 else
5829 {
5830 /* Exception pending - don't change TOP or the register stack. */
5831 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5832 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5833 }
5834}
5835
5836
5837DECL_NO_INLINE(IEM_STATIC, void)
5838iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5839{
5840 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5841 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5842 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5843
5844 if (pFpuCtx->FCW & X86_FCW_IM)
5845 {
5846 /* Masked overflow - Push QNaN. */
5847 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5848 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5849 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5850 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5851 pFpuCtx->FTW |= RT_BIT(iNewTop);
5852 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5853 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5854 iemFpuRotateStackPush(pFpuCtx);
5855 }
5856 else
5857 {
5858 /* Exception pending - don't change TOP or the register stack. */
5859 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5860 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5861 }
5862}
5863
5864
5865/**
5866 * Worker routine for raising an FPU stack overflow exception on a push.
5867 *
5868 * @param pFpuCtx The FPU context.
5869 */
5870IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5871{
5872 if (pFpuCtx->FCW & X86_FCW_IM)
5873 {
5874 /* Masked overflow. */
5875 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5876 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5877 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5878 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5879 pFpuCtx->FTW |= RT_BIT(iNewTop);
5880 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5881 iemFpuRotateStackPush(pFpuCtx);
5882 }
5883 else
5884 {
5885 /* Exception pending - don't change TOP or the register stack. */
5886 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5887 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5888 }
5889}
5890
5891
5892/**
5893 * Raises a FPU stack overflow exception on a push.
5894 *
5895 * @param pIemCpu The IEM per CPU data.
5896 */
5897DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5898{
5899 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5900 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5901 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5902 iemFpuStackPushOverflowOnly(pFpuCtx);
5903}
5904
5905
5906/**
5907 * Raises a FPU stack overflow exception on a push with a memory operand.
5908 *
5909 * @param pIemCpu The IEM per CPU data.
5910 * @param iEffSeg The effective memory operand selector register.
5911 * @param GCPtrEff The effective memory operand offset.
5912 */
5913DECL_NO_INLINE(IEM_STATIC, void)
5914iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5915{
5916 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5917 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5918 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5919 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5920 iemFpuStackPushOverflowOnly(pFpuCtx);
5921}
5922
5923
5924IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5925{
5926 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5927 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5928 if (pFpuCtx->FTW & RT_BIT(iReg))
5929 return VINF_SUCCESS;
5930 return VERR_NOT_FOUND;
5931}
5932
5933
5934IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5935{
5936 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5937 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5938 if (pFpuCtx->FTW & RT_BIT(iReg))
5939 {
5940 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
5941 return VINF_SUCCESS;
5942 }
5943 return VERR_NOT_FOUND;
5944}
5945
5946
5947IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5948 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5949{
5950 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5951 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5952 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5953 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5954 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5955 {
5956 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5957 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
5958 return VINF_SUCCESS;
5959 }
5960 return VERR_NOT_FOUND;
5961}
5962
5963
5964IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5965{
5966 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5967 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5968 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5969 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5970 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5971 {
5972 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5973 return VINF_SUCCESS;
5974 }
5975 return VERR_NOT_FOUND;
5976}
5977
5978
5979/**
5980 * Updates the FPU exception status after FCW is changed.
5981 *
5982 * @param pFpuCtx The FPU context.
5983 */
5984IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
5985{
5986 uint16_t u16Fsw = pFpuCtx->FSW;
5987 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
5988 u16Fsw |= X86_FSW_ES | X86_FSW_B;
5989 else
5990 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
5991 pFpuCtx->FSW = u16Fsw;
5992}
5993
5994
5995/**
5996 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
5997 *
5998 * @returns The full FTW.
5999 * @param pFpuCtx The FPU context.
6000 */
6001IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6002{
6003 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6004 uint16_t u16Ftw = 0;
6005 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6006 for (unsigned iSt = 0; iSt < 8; iSt++)
6007 {
6008 unsigned const iReg = (iSt + iTop) & 7;
6009 if (!(u8Ftw & RT_BIT(iReg)))
6010 u16Ftw |= 3 << (iReg * 2); /* empty */
6011 else
6012 {
6013 uint16_t uTag;
6014 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6015 if (pr80Reg->s.uExponent == 0x7fff)
6016 uTag = 2; /* Exponent is all 1's => Special. */
6017 else if (pr80Reg->s.uExponent == 0x0000)
6018 {
6019 if (pr80Reg->s.u64Mantissa == 0x0000)
6020 uTag = 1; /* All bits are zero => Zero. */
6021 else
6022 uTag = 2; /* Must be special. */
6023 }
6024 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6025 uTag = 0; /* Valid. */
6026 else
6027 uTag = 2; /* Must be special. */
6028
6029 u16Ftw |= uTag << (iReg * 2); /* empty */
6030 }
6031 }
6032
6033 return u16Ftw;
6034}
6035
6036
6037/**
6038 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6039 *
6040 * @returns The compressed FTW.
6041 * @param u16FullFtw The full FTW to convert.
6042 */
6043IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6044{
6045 uint8_t u8Ftw = 0;
6046 for (unsigned i = 0; i < 8; i++)
6047 {
6048 if ((u16FullFtw & 3) != 3 /*empty*/)
6049 u8Ftw |= RT_BIT(i);
6050 u16FullFtw >>= 2;
6051 }
6052
6053 return u8Ftw;
6054}
6055
6056/** @} */
6057
6058
6059/** @name Memory access.
6060 *
6061 * @{
6062 */
6063
6064
6065/**
6066 * Updates the IEMCPU::cbWritten counter if applicable.
6067 *
6068 * @param pIemCpu The IEM per CPU data.
6069 * @param fAccess The access being accounted for.
6070 * @param cbMem The access size.
6071 */
6072DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6073{
6074 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6075 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6076 pIemCpu->cbWritten += (uint32_t)cbMem;
6077}
6078
6079
6080/**
6081 * Checks if the given segment can be written to, raise the appropriate
6082 * exception if not.
6083 *
6084 * @returns VBox strict status code.
6085 *
6086 * @param pIemCpu The IEM per CPU data.
6087 * @param pHid Pointer to the hidden register.
6088 * @param iSegReg The register number.
6089 * @param pu64BaseAddr Where to return the base address to use for the
6090 * segment. (In 64-bit code it may differ from the
6091 * base in the hidden segment.)
6092 */
6093IEM_STATIC VBOXSTRICTRC
6094iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6095{
6096 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6097 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6098 else
6099 {
6100 if (!pHid->Attr.n.u1Present)
6101 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6102
6103 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6104 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6105 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6106 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6107 *pu64BaseAddr = pHid->u64Base;
6108 }
6109 return VINF_SUCCESS;
6110}
6111
6112
6113/**
6114 * Checks if the given segment can be read from, raise the appropriate
6115 * exception if not.
6116 *
6117 * @returns VBox strict status code.
6118 *
6119 * @param pIemCpu The IEM per CPU data.
6120 * @param pHid Pointer to the hidden register.
6121 * @param iSegReg The register number.
6122 * @param pu64BaseAddr Where to return the base address to use for the
6123 * segment. (In 64-bit code it may differ from the
6124 * base in the hidden segment.)
6125 */
6126IEM_STATIC VBOXSTRICTRC
6127iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6128{
6129 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6130 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6131 else
6132 {
6133 if (!pHid->Attr.n.u1Present)
6134 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6135
6136 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6137 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6138 *pu64BaseAddr = pHid->u64Base;
6139 }
6140 return VINF_SUCCESS;
6141}
6142
6143
6144/**
6145 * Applies the segment limit, base and attributes.
6146 *
6147 * This may raise a \#GP or \#SS.
6148 *
6149 * @returns VBox strict status code.
6150 *
6151 * @param pIemCpu The IEM per CPU data.
6152 * @param fAccess The kind of access which is being performed.
6153 * @param iSegReg The index of the segment register to apply.
6154 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6155 * TSS, ++).
6156 * @param pGCPtrMem Pointer to the guest memory address to apply
6157 * segmentation to. Input and output parameter.
6158 */
6159IEM_STATIC VBOXSTRICTRC
6160iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6161{
6162 if (iSegReg == UINT8_MAX)
6163 return VINF_SUCCESS;
6164
6165 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6166 switch (pIemCpu->enmCpuMode)
6167 {
6168 case IEMMODE_16BIT:
6169 case IEMMODE_32BIT:
6170 {
6171 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6172 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6173
6174 Assert(pSel->Attr.n.u1Present);
6175 Assert(pSel->Attr.n.u1DescType);
6176 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6177 {
6178 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6179 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6180 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6181
6182 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6183 {
6184 /** @todo CPL check. */
6185 }
6186
6187 /*
6188 * There are two kinds of data selectors, normal and expand down.
6189 */
6190 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6191 {
6192 if ( GCPtrFirst32 > pSel->u32Limit
6193 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6194 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6195 }
6196 else
6197 {
6198 /*
6199 * The upper boundary is defined by the B bit, not the G bit!
6200 */
6201 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6202 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6203 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6204 }
6205 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6206 }
6207 else
6208 {
6209
6210 /*
6211 * Code selector and usually be used to read thru, writing is
6212 * only permitted in real and V8086 mode.
6213 */
6214 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6215 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6216 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6217 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6218 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6219
6220 if ( GCPtrFirst32 > pSel->u32Limit
6221 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6222 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6223
6224 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6225 {
6226 /** @todo CPL check. */
6227 }
6228
6229 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6230 }
6231 return VINF_SUCCESS;
6232 }
6233
6234 case IEMMODE_64BIT:
6235 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6236 *pGCPtrMem += pSel->u64Base;
6237 return VINF_SUCCESS;
6238
6239 default:
6240 AssertFailedReturn(VERR_IEM_IPE_7);
6241 }
6242}
6243
6244
6245/**
6246 * Translates a virtual address to a physical physical address and checks if we
6247 * can access the page as specified.
6248 *
6249 * @param pIemCpu The IEM per CPU data.
6250 * @param GCPtrMem The virtual address.
6251 * @param fAccess The intended access.
6252 * @param pGCPhysMem Where to return the physical address.
6253 */
6254IEM_STATIC VBOXSTRICTRC
6255iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6256{
6257 /** @todo Need a different PGM interface here. We're currently using
6258 * generic / REM interfaces. this won't cut it for R0 & RC. */
6259 RTGCPHYS GCPhys;
6260 uint64_t fFlags;
6261 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6262 if (RT_FAILURE(rc))
6263 {
6264 /** @todo Check unassigned memory in unpaged mode. */
6265 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6266 *pGCPhysMem = NIL_RTGCPHYS;
6267 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6268 }
6269
6270 /* If the page is writable and does not have the no-exec bit set, all
6271 access is allowed. Otherwise we'll have to check more carefully... */
6272 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6273 {
6274 /* Write to read only memory? */
6275 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6276 && !(fFlags & X86_PTE_RW)
6277 && ( pIemCpu->uCpl != 0
6278 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6279 {
6280 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6281 *pGCPhysMem = NIL_RTGCPHYS;
6282 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6283 }
6284
6285 /* Kernel memory accessed by userland? */
6286 if ( !(fFlags & X86_PTE_US)
6287 && pIemCpu->uCpl == 3
6288 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6289 {
6290 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6291 *pGCPhysMem = NIL_RTGCPHYS;
6292 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6293 }
6294
6295 /* Executing non-executable memory? */
6296 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6297 && (fFlags & X86_PTE_PAE_NX)
6298 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6299 {
6300 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6301 *pGCPhysMem = NIL_RTGCPHYS;
6302 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6303 VERR_ACCESS_DENIED);
6304 }
6305 }
6306
6307 /*
6308 * Set the dirty / access flags.
6309 * ASSUMES this is set when the address is translated rather than on committ...
6310 */
6311 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6312 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6313 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6314 {
6315 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6316 AssertRC(rc2);
6317 }
6318
6319 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6320 *pGCPhysMem = GCPhys;
6321 return VINF_SUCCESS;
6322}
6323
6324
6325
6326/**
6327 * Maps a physical page.
6328 *
6329 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6330 * @param pIemCpu The IEM per CPU data.
6331 * @param GCPhysMem The physical address.
6332 * @param fAccess The intended access.
6333 * @param ppvMem Where to return the mapping address.
6334 * @param pLock The PGM lock.
6335 */
6336IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6337{
6338#ifdef IEM_VERIFICATION_MODE_FULL
6339 /* Force the alternative path so we can ignore writes. */
6340 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6341 {
6342 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6343 {
6344 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6345 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6346 if (RT_FAILURE(rc2))
6347 pIemCpu->fProblematicMemory = true;
6348 }
6349 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6350 }
6351#endif
6352#ifdef IEM_LOG_MEMORY_WRITES
6353 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6354 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6355#endif
6356#ifdef IEM_VERIFICATION_MODE_MINIMAL
6357 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6358#endif
6359
6360 /** @todo This API may require some improving later. A private deal with PGM
6361 * regarding locking and unlocking needs to be struct. A couple of TLBs
6362 * living in PGM, but with publicly accessible inlined access methods
6363 * could perhaps be an even better solution. */
6364 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6365 GCPhysMem,
6366 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6367 pIemCpu->fBypassHandlers,
6368 ppvMem,
6369 pLock);
6370 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6371 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6372
6373#ifdef IEM_VERIFICATION_MODE_FULL
6374 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6375 pIemCpu->fProblematicMemory = true;
6376#endif
6377 return rc;
6378}
6379
6380
6381/**
6382 * Unmap a page previously mapped by iemMemPageMap.
6383 *
6384 * @param pIemCpu The IEM per CPU data.
6385 * @param GCPhysMem The physical address.
6386 * @param fAccess The intended access.
6387 * @param pvMem What iemMemPageMap returned.
6388 * @param pLock The PGM lock.
6389 */
6390DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6391{
6392 NOREF(pIemCpu);
6393 NOREF(GCPhysMem);
6394 NOREF(fAccess);
6395 NOREF(pvMem);
6396 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6397}
6398
6399
6400/**
6401 * Looks up a memory mapping entry.
6402 *
6403 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6404 * @param pIemCpu The IEM per CPU data.
6405 * @param pvMem The memory address.
6406 * @param fAccess The access to.
6407 */
6408DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6409{
6410 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6411 if ( pIemCpu->aMemMappings[0].pv == pvMem
6412 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6413 return 0;
6414 if ( pIemCpu->aMemMappings[1].pv == pvMem
6415 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6416 return 1;
6417 if ( pIemCpu->aMemMappings[2].pv == pvMem
6418 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6419 return 2;
6420 return VERR_NOT_FOUND;
6421}
6422
6423
6424/**
6425 * Finds a free memmap entry when using iNextMapping doesn't work.
6426 *
6427 * @returns Memory mapping index, 1024 on failure.
6428 * @param pIemCpu The IEM per CPU data.
6429 */
6430IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6431{
6432 /*
6433 * The easy case.
6434 */
6435 if (pIemCpu->cActiveMappings == 0)
6436 {
6437 pIemCpu->iNextMapping = 1;
6438 return 0;
6439 }
6440
6441 /* There should be enough mappings for all instructions. */
6442 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6443
6444 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6445 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6446 return i;
6447
6448 AssertFailedReturn(1024);
6449}
6450
6451
6452/**
6453 * Commits a bounce buffer that needs writing back and unmaps it.
6454 *
6455 * @returns Strict VBox status code.
6456 * @param pIemCpu The IEM per CPU data.
6457 * @param iMemMap The index of the buffer to commit.
6458 */
6459IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6460{
6461 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6462 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6463
6464 /*
6465 * Do the writing.
6466 */
6467#ifndef IEM_VERIFICATION_MODE_MINIMAL
6468 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6469 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6470 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6471 {
6472 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6473 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6474 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6475 if (!pIemCpu->fBypassHandlers)
6476 {
6477 /*
6478 * Carefully and efficiently dealing with access handler return
6479 * codes make this a little bloated.
6480 */
6481 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6482 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6483 pbBuf,
6484 cbFirst,
6485 PGMACCESSORIGIN_IEM);
6486 if (rcStrict == VINF_SUCCESS)
6487 {
6488 if (cbSecond)
6489 {
6490 rcStrict = PGMPhysWrite(pVM,
6491 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6492 pbBuf + cbFirst,
6493 cbSecond,
6494 PGMACCESSORIGIN_IEM);
6495 if (rcStrict == VINF_SUCCESS)
6496 { /* nothing */ }
6497 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6498 {
6499 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6500 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6501 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6502 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6503 }
6504 else
6505 {
6506 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6507 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6508 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6509 return rcStrict;
6510 }
6511 }
6512 }
6513 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6514 {
6515 if (!cbSecond)
6516 {
6517 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6518 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6519 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6520 }
6521 else
6522 {
6523 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6524 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6525 pbBuf + cbFirst,
6526 cbSecond,
6527 PGMACCESSORIGIN_IEM);
6528 if (rcStrict2 == VINF_SUCCESS)
6529 {
6530 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6531 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6532 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6533 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6534 }
6535 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6536 {
6537 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6538 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6539 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6540 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6541 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6542 }
6543 else
6544 {
6545 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6546 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6547 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6548 return rcStrict2;
6549 }
6550 }
6551 }
6552 else
6553 {
6554 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6555 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6556 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6557 return rcStrict;
6558 }
6559 }
6560 else
6561 {
6562 /*
6563 * No access handlers, much simpler.
6564 */
6565 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6566 if (RT_SUCCESS(rc))
6567 {
6568 if (cbSecond)
6569 {
6570 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6571 if (RT_SUCCESS(rc))
6572 { /* likely */ }
6573 else
6574 {
6575 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6576 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6577 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6578 return rc;
6579 }
6580 }
6581 }
6582 else
6583 {
6584 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6585 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6586 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6587 return rc;
6588 }
6589 }
6590 }
6591#endif
6592
6593#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6594 /*
6595 * Record the write(s).
6596 */
6597 if (!pIemCpu->fNoRem)
6598 {
6599 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6600 if (pEvtRec)
6601 {
6602 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6603 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6604 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6605 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6606 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6607 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6608 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6609 }
6610 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6611 {
6612 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6613 if (pEvtRec)
6614 {
6615 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6616 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6617 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6618 memcpy(pEvtRec->u.RamWrite.ab,
6619 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6620 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6621 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6622 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6623 }
6624 }
6625 }
6626#endif
6627#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6628 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6629 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6630 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6631 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6632 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6633 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6634
6635 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6636 g_cbIemWrote = cbWrote;
6637 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6638#endif
6639
6640 /*
6641 * Free the mapping entry.
6642 */
6643 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6644 Assert(pIemCpu->cActiveMappings != 0);
6645 pIemCpu->cActiveMappings--;
6646 return VINF_SUCCESS;
6647}
6648
6649
6650/**
6651 * iemMemMap worker that deals with a request crossing pages.
6652 */
6653IEM_STATIC VBOXSTRICTRC
6654iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6655{
6656 /*
6657 * Do the address translations.
6658 */
6659 RTGCPHYS GCPhysFirst;
6660 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6661 if (rcStrict != VINF_SUCCESS)
6662 return rcStrict;
6663
6664/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6665 * last byte. */
6666 RTGCPHYS GCPhysSecond;
6667 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6668 if (rcStrict != VINF_SUCCESS)
6669 return rcStrict;
6670 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6671
6672 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6673#ifdef IEM_VERIFICATION_MODE_FULL
6674 /*
6675 * Detect problematic memory when verifying so we can select
6676 * the right execution engine. (TLB: Redo this.)
6677 */
6678 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6679 {
6680 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6681 if (RT_SUCCESS(rc2))
6682 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6683 if (RT_FAILURE(rc2))
6684 pIemCpu->fProblematicMemory = true;
6685 }
6686#endif
6687
6688
6689 /*
6690 * Read in the current memory content if it's a read, execute or partial
6691 * write access.
6692 */
6693 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6694 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6695 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6696
6697 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6698 {
6699 if (!pIemCpu->fBypassHandlers)
6700 {
6701 /*
6702 * Must carefully deal with access handler status codes here,
6703 * makes the code a bit bloated.
6704 */
6705 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6706 if (rcStrict == VINF_SUCCESS)
6707 {
6708 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6709 if (rcStrict == VINF_SUCCESS)
6710 { /*likely */ }
6711 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6712 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6713 else
6714 {
6715 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6716 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6717 return rcStrict;
6718 }
6719 }
6720 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6721 {
6722 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6723 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6724 {
6725 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6726 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6727 }
6728 else
6729 {
6730 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6731 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6732 return rcStrict2;
6733 }
6734 }
6735 else
6736 {
6737 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6738 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6739 return rcStrict;
6740 }
6741 }
6742 else
6743 {
6744 /*
6745 * No informational status codes here, much more straight forward.
6746 */
6747 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6748 if (RT_SUCCESS(rc))
6749 {
6750 Assert(rc == VINF_SUCCESS);
6751 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6752 if (RT_SUCCESS(rc))
6753 Assert(rc == VINF_SUCCESS);
6754 else
6755 {
6756 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6757 return rc;
6758 }
6759 }
6760 else
6761 {
6762 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6763 return rc;
6764 }
6765 }
6766
6767#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6768 if ( !pIemCpu->fNoRem
6769 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6770 {
6771 /*
6772 * Record the reads.
6773 */
6774 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6775 if (pEvtRec)
6776 {
6777 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6778 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6779 pEvtRec->u.RamRead.cb = cbFirstPage;
6780 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6781 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6782 }
6783 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6784 if (pEvtRec)
6785 {
6786 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6787 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6788 pEvtRec->u.RamRead.cb = cbSecondPage;
6789 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6790 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6791 }
6792 }
6793#endif
6794 }
6795#ifdef VBOX_STRICT
6796 else
6797 memset(pbBuf, 0xcc, cbMem);
6798 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6799 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6800#endif
6801
6802 /*
6803 * Commit the bounce buffer entry.
6804 */
6805 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6806 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6807 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6808 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6809 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6810 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6811 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6812 pIemCpu->iNextMapping = iMemMap + 1;
6813 pIemCpu->cActiveMappings++;
6814
6815 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6816 *ppvMem = pbBuf;
6817 return VINF_SUCCESS;
6818}
6819
6820
6821/**
6822 * iemMemMap woker that deals with iemMemPageMap failures.
6823 */
6824IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6825 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6826{
6827 /*
6828 * Filter out conditions we can handle and the ones which shouldn't happen.
6829 */
6830 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6831 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6832 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6833 {
6834 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6835 return rcMap;
6836 }
6837 pIemCpu->cPotentialExits++;
6838
6839 /*
6840 * Read in the current memory content if it's a read, execute or partial
6841 * write access.
6842 */
6843 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6844 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6845 {
6846 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6847 memset(pbBuf, 0xff, cbMem);
6848 else
6849 {
6850 int rc;
6851 if (!pIemCpu->fBypassHandlers)
6852 {
6853 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6854 if (rcStrict == VINF_SUCCESS)
6855 { /* nothing */ }
6856 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6857 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6858 else
6859 {
6860 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6861 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6862 return rcStrict;
6863 }
6864 }
6865 else
6866 {
6867 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6868 if (RT_SUCCESS(rc))
6869 { /* likely */ }
6870 else
6871 {
6872 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6873 GCPhysFirst, rc));
6874 return rc;
6875 }
6876 }
6877 }
6878
6879#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6880 if ( !pIemCpu->fNoRem
6881 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6882 {
6883 /*
6884 * Record the read.
6885 */
6886 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6887 if (pEvtRec)
6888 {
6889 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6890 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6891 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6892 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6893 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6894 }
6895 }
6896#endif
6897 }
6898#ifdef VBOX_STRICT
6899 else
6900 memset(pbBuf, 0xcc, cbMem);
6901#endif
6902#ifdef VBOX_STRICT
6903 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6904 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6905#endif
6906
6907 /*
6908 * Commit the bounce buffer entry.
6909 */
6910 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6911 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6912 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6913 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6914 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6915 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6916 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6917 pIemCpu->iNextMapping = iMemMap + 1;
6918 pIemCpu->cActiveMappings++;
6919
6920 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6921 *ppvMem = pbBuf;
6922 return VINF_SUCCESS;
6923}
6924
6925
6926
6927/**
6928 * Maps the specified guest memory for the given kind of access.
6929 *
6930 * This may be using bounce buffering of the memory if it's crossing a page
6931 * boundary or if there is an access handler installed for any of it. Because
6932 * of lock prefix guarantees, we're in for some extra clutter when this
6933 * happens.
6934 *
6935 * This may raise a \#GP, \#SS, \#PF or \#AC.
6936 *
6937 * @returns VBox strict status code.
6938 *
6939 * @param pIemCpu The IEM per CPU data.
6940 * @param ppvMem Where to return the pointer to the mapped
6941 * memory.
6942 * @param cbMem The number of bytes to map. This is usually 1,
6943 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6944 * string operations it can be up to a page.
6945 * @param iSegReg The index of the segment register to use for
6946 * this access. The base and limits are checked.
6947 * Use UINT8_MAX to indicate that no segmentation
6948 * is required (for IDT, GDT and LDT accesses).
6949 * @param GCPtrMem The address of the guest memory.
6950 * @param a_fAccess How the memory is being accessed. The
6951 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6952 * how to map the memory, while the
6953 * IEM_ACCESS_WHAT_XXX bit is used when raising
6954 * exceptions.
6955 */
6956IEM_STATIC VBOXSTRICTRC
6957iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
6958{
6959 /*
6960 * Check the input and figure out which mapping entry to use.
6961 */
6962 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6963 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6964
6965 unsigned iMemMap = pIemCpu->iNextMapping;
6966 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
6967 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6968 {
6969 iMemMap = iemMemMapFindFree(pIemCpu);
6970 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
6971 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
6972 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
6973 pIemCpu->aMemMappings[2].fAccess),
6974 VERR_IEM_IPE_9);
6975 }
6976
6977 /*
6978 * Map the memory, checking that we can actually access it. If something
6979 * slightly complicated happens, fall back on bounce buffering.
6980 */
6981 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6982 if (rcStrict != VINF_SUCCESS)
6983 return rcStrict;
6984
6985 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
6986 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6987
6988 RTGCPHYS GCPhysFirst;
6989 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
6990 if (rcStrict != VINF_SUCCESS)
6991 return rcStrict;
6992
6993 void *pvMem;
6994 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6995 if (rcStrict != VINF_SUCCESS)
6996 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6997
6998 /*
6999 * Fill in the mapping table entry.
7000 */
7001 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7002 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7003 pIemCpu->iNextMapping = iMemMap + 1;
7004 pIemCpu->cActiveMappings++;
7005
7006 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7007 *ppvMem = pvMem;
7008 return VINF_SUCCESS;
7009}
7010
7011
7012/**
7013 * Commits the guest memory if bounce buffered and unmaps it.
7014 *
7015 * @returns Strict VBox status code.
7016 * @param pIemCpu The IEM per CPU data.
7017 * @param pvMem The mapping.
7018 * @param fAccess The kind of access.
7019 */
7020IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7021{
7022 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7023 AssertReturn(iMemMap >= 0, iMemMap);
7024
7025 /* If it's bounce buffered, we may need to write back the buffer. */
7026 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7027 {
7028 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7029 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7030 }
7031 /* Otherwise unlock it. */
7032 else
7033 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7034
7035 /* Free the entry. */
7036 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7037 Assert(pIemCpu->cActiveMappings != 0);
7038 pIemCpu->cActiveMappings--;
7039 return VINF_SUCCESS;
7040}
7041
7042
7043/**
7044 * Rollbacks mappings, releasing page locks and such.
7045 *
7046 * The caller shall only call this after checking cActiveMappings.
7047 *
7048 * @returns Strict VBox status code to pass up.
7049 * @param pIemCpu The IEM per CPU data.
7050 */
7051IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7052{
7053 Assert(pIemCpu->cActiveMappings > 0);
7054
7055 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7056 while (iMemMap-- > 0)
7057 {
7058 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7059 if (fAccess != IEM_ACCESS_INVALID)
7060 {
7061 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7062 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7063 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7064 Assert(pIemCpu->cActiveMappings > 0);
7065 pIemCpu->cActiveMappings--;
7066 }
7067 }
7068}
7069
7070
7071/**
7072 * Fetches a data byte.
7073 *
7074 * @returns Strict VBox status code.
7075 * @param pIemCpu The IEM per CPU data.
7076 * @param pu8Dst Where to return the byte.
7077 * @param iSegReg The index of the segment register to use for
7078 * this access. The base and limits are checked.
7079 * @param GCPtrMem The address of the guest memory.
7080 */
7081IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7082{
7083 /* The lazy approach for now... */
7084 uint8_t const *pu8Src;
7085 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7086 if (rc == VINF_SUCCESS)
7087 {
7088 *pu8Dst = *pu8Src;
7089 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7090 }
7091 return rc;
7092}
7093
7094
7095/**
7096 * Fetches a data word.
7097 *
7098 * @returns Strict VBox status code.
7099 * @param pIemCpu The IEM per CPU data.
7100 * @param pu16Dst Where to return the word.
7101 * @param iSegReg The index of the segment register to use for
7102 * this access. The base and limits are checked.
7103 * @param GCPtrMem The address of the guest memory.
7104 */
7105IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7106{
7107 /* The lazy approach for now... */
7108 uint16_t const *pu16Src;
7109 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7110 if (rc == VINF_SUCCESS)
7111 {
7112 *pu16Dst = *pu16Src;
7113 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7114 }
7115 return rc;
7116}
7117
7118
7119/**
7120 * Fetches a data dword.
7121 *
7122 * @returns Strict VBox status code.
7123 * @param pIemCpu The IEM per CPU data.
7124 * @param pu32Dst Where to return the dword.
7125 * @param iSegReg The index of the segment register to use for
7126 * this access. The base and limits are checked.
7127 * @param GCPtrMem The address of the guest memory.
7128 */
7129IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7130{
7131 /* The lazy approach for now... */
7132 uint32_t const *pu32Src;
7133 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7134 if (rc == VINF_SUCCESS)
7135 {
7136 *pu32Dst = *pu32Src;
7137 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7138 }
7139 return rc;
7140}
7141
7142
7143#ifdef SOME_UNUSED_FUNCTION
7144/**
7145 * Fetches a data dword and sign extends it to a qword.
7146 *
7147 * @returns Strict VBox status code.
7148 * @param pIemCpu The IEM per CPU data.
7149 * @param pu64Dst Where to return the sign extended value.
7150 * @param iSegReg The index of the segment register to use for
7151 * this access. The base and limits are checked.
7152 * @param GCPtrMem The address of the guest memory.
7153 */
7154IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7155{
7156 /* The lazy approach for now... */
7157 int32_t const *pi32Src;
7158 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7159 if (rc == VINF_SUCCESS)
7160 {
7161 *pu64Dst = *pi32Src;
7162 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7163 }
7164#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7165 else
7166 *pu64Dst = 0;
7167#endif
7168 return rc;
7169}
7170#endif
7171
7172
7173/**
7174 * Fetches a data qword.
7175 *
7176 * @returns Strict VBox status code.
7177 * @param pIemCpu The IEM per CPU data.
7178 * @param pu64Dst Where to return the qword.
7179 * @param iSegReg The index of the segment register to use for
7180 * this access. The base and limits are checked.
7181 * @param GCPtrMem The address of the guest memory.
7182 */
7183IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7184{
7185 /* The lazy approach for now... */
7186 uint64_t const *pu64Src;
7187 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7188 if (rc == VINF_SUCCESS)
7189 {
7190 *pu64Dst = *pu64Src;
7191 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7192 }
7193 return rc;
7194}
7195
7196
7197/**
7198 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7199 *
7200 * @returns Strict VBox status code.
7201 * @param pIemCpu The IEM per CPU data.
7202 * @param pu64Dst Where to return the qword.
7203 * @param iSegReg The index of the segment register to use for
7204 * this access. The base and limits are checked.
7205 * @param GCPtrMem The address of the guest memory.
7206 */
7207IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7208{
7209 /* The lazy approach for now... */
7210 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7211 if (RT_UNLIKELY(GCPtrMem & 15))
7212 return iemRaiseGeneralProtectionFault0(pIemCpu);
7213
7214 uint64_t const *pu64Src;
7215 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7216 if (rc == VINF_SUCCESS)
7217 {
7218 *pu64Dst = *pu64Src;
7219 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7220 }
7221 return rc;
7222}
7223
7224
7225/**
7226 * Fetches a data tword.
7227 *
7228 * @returns Strict VBox status code.
7229 * @param pIemCpu The IEM per CPU data.
7230 * @param pr80Dst Where to return the tword.
7231 * @param iSegReg The index of the segment register to use for
7232 * this access. The base and limits are checked.
7233 * @param GCPtrMem The address of the guest memory.
7234 */
7235IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7236{
7237 /* The lazy approach for now... */
7238 PCRTFLOAT80U pr80Src;
7239 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7240 if (rc == VINF_SUCCESS)
7241 {
7242 *pr80Dst = *pr80Src;
7243 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7244 }
7245 return rc;
7246}
7247
7248
7249/**
7250 * Fetches a data dqword (double qword), generally SSE related.
7251 *
7252 * @returns Strict VBox status code.
7253 * @param pIemCpu The IEM per CPU data.
7254 * @param pu128Dst Where to return the qword.
7255 * @param iSegReg The index of the segment register to use for
7256 * this access. The base and limits are checked.
7257 * @param GCPtrMem The address of the guest memory.
7258 */
7259IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7260{
7261 /* The lazy approach for now... */
7262 uint128_t const *pu128Src;
7263 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7264 if (rc == VINF_SUCCESS)
7265 {
7266 *pu128Dst = *pu128Src;
7267 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7268 }
7269 return rc;
7270}
7271
7272
7273/**
7274 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7275 * related.
7276 *
7277 * Raises \#GP(0) if not aligned.
7278 *
7279 * @returns Strict VBox status code.
7280 * @param pIemCpu The IEM per CPU data.
7281 * @param pu128Dst Where to return the qword.
7282 * @param iSegReg The index of the segment register to use for
7283 * this access. The base and limits are checked.
7284 * @param GCPtrMem The address of the guest memory.
7285 */
7286IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7287{
7288 /* The lazy approach for now... */
7289 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7290 if ( (GCPtrMem & 15)
7291 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7292 return iemRaiseGeneralProtectionFault0(pIemCpu);
7293
7294 uint128_t const *pu128Src;
7295 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7296 if (rc == VINF_SUCCESS)
7297 {
7298 *pu128Dst = *pu128Src;
7299 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7300 }
7301 return rc;
7302}
7303
7304
7305
7306
7307/**
7308 * Fetches a descriptor register (lgdt, lidt).
7309 *
7310 * @returns Strict VBox status code.
7311 * @param pIemCpu The IEM per CPU data.
7312 * @param pcbLimit Where to return the limit.
7313 * @param pGCPTrBase Where to return the base.
7314 * @param iSegReg The index of the segment register to use for
7315 * this access. The base and limits are checked.
7316 * @param GCPtrMem The address of the guest memory.
7317 * @param enmOpSize The effective operand size.
7318 */
7319IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7320 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7321{
7322 uint8_t const *pu8Src;
7323 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7324 (void **)&pu8Src,
7325 enmOpSize == IEMMODE_64BIT
7326 ? 2 + 8
7327 : enmOpSize == IEMMODE_32BIT
7328 ? 2 + 4
7329 : 2 + 3,
7330 iSegReg,
7331 GCPtrMem,
7332 IEM_ACCESS_DATA_R);
7333 if (rcStrict == VINF_SUCCESS)
7334 {
7335 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7336 switch (enmOpSize)
7337 {
7338 case IEMMODE_16BIT:
7339 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7340 break;
7341 case IEMMODE_32BIT:
7342 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7343 break;
7344 case IEMMODE_64BIT:
7345 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7346 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7347 break;
7348
7349 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7350 }
7351 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7352 }
7353 return rcStrict;
7354}
7355
7356
7357
7358/**
7359 * Stores a data byte.
7360 *
7361 * @returns Strict VBox status code.
7362 * @param pIemCpu The IEM per CPU data.
7363 * @param iSegReg The index of the segment register to use for
7364 * this access. The base and limits are checked.
7365 * @param GCPtrMem The address of the guest memory.
7366 * @param u8Value The value to store.
7367 */
7368IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7369{
7370 /* The lazy approach for now... */
7371 uint8_t *pu8Dst;
7372 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7373 if (rc == VINF_SUCCESS)
7374 {
7375 *pu8Dst = u8Value;
7376 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7377 }
7378 return rc;
7379}
7380
7381
7382/**
7383 * Stores a data word.
7384 *
7385 * @returns Strict VBox status code.
7386 * @param pIemCpu The IEM per CPU data.
7387 * @param iSegReg The index of the segment register to use for
7388 * this access. The base and limits are checked.
7389 * @param GCPtrMem The address of the guest memory.
7390 * @param u16Value The value to store.
7391 */
7392IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7393{
7394 /* The lazy approach for now... */
7395 uint16_t *pu16Dst;
7396 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7397 if (rc == VINF_SUCCESS)
7398 {
7399 *pu16Dst = u16Value;
7400 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7401 }
7402 return rc;
7403}
7404
7405
7406/**
7407 * Stores a data dword.
7408 *
7409 * @returns Strict VBox status code.
7410 * @param pIemCpu The IEM per CPU data.
7411 * @param iSegReg The index of the segment register to use for
7412 * this access. The base and limits are checked.
7413 * @param GCPtrMem The address of the guest memory.
7414 * @param u32Value The value to store.
7415 */
7416IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7417{
7418 /* The lazy approach for now... */
7419 uint32_t *pu32Dst;
7420 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7421 if (rc == VINF_SUCCESS)
7422 {
7423 *pu32Dst = u32Value;
7424 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7425 }
7426 return rc;
7427}
7428
7429
7430/**
7431 * Stores a data qword.
7432 *
7433 * @returns Strict VBox status code.
7434 * @param pIemCpu The IEM per CPU data.
7435 * @param iSegReg The index of the segment register to use for
7436 * this access. The base and limits are checked.
7437 * @param GCPtrMem The address of the guest memory.
7438 * @param u64Value The value to store.
7439 */
7440IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7441{
7442 /* The lazy approach for now... */
7443 uint64_t *pu64Dst;
7444 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7445 if (rc == VINF_SUCCESS)
7446 {
7447 *pu64Dst = u64Value;
7448 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7449 }
7450 return rc;
7451}
7452
7453
7454/**
7455 * Stores a data dqword.
7456 *
7457 * @returns Strict VBox status code.
7458 * @param pIemCpu The IEM per CPU data.
7459 * @param iSegReg The index of the segment register to use for
7460 * this access. The base and limits are checked.
7461 * @param GCPtrMem The address of the guest memory.
7462 * @param u64Value The value to store.
7463 */
7464IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7465{
7466 /* The lazy approach for now... */
7467 uint128_t *pu128Dst;
7468 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7469 if (rc == VINF_SUCCESS)
7470 {
7471 *pu128Dst = u128Value;
7472 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7473 }
7474 return rc;
7475}
7476
7477
7478/**
7479 * Stores a data dqword, SSE aligned.
7480 *
7481 * @returns Strict VBox status code.
7482 * @param pIemCpu The IEM per CPU data.
7483 * @param iSegReg The index of the segment register to use for
7484 * this access. The base and limits are checked.
7485 * @param GCPtrMem The address of the guest memory.
7486 * @param u64Value The value to store.
7487 */
7488IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7489{
7490 /* The lazy approach for now... */
7491 if ( (GCPtrMem & 15)
7492 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7493 return iemRaiseGeneralProtectionFault0(pIemCpu);
7494
7495 uint128_t *pu128Dst;
7496 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7497 if (rc == VINF_SUCCESS)
7498 {
7499 *pu128Dst = u128Value;
7500 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7501 }
7502 return rc;
7503}
7504
7505
7506/**
7507 * Stores a descriptor register (sgdt, sidt).
7508 *
7509 * @returns Strict VBox status code.
7510 * @param pIemCpu The IEM per CPU data.
7511 * @param cbLimit The limit.
7512 * @param GCPTrBase The base address.
7513 * @param iSegReg The index of the segment register to use for
7514 * this access. The base and limits are checked.
7515 * @param GCPtrMem The address of the guest memory.
7516 * @param enmOpSize The effective operand size.
7517 */
7518IEM_STATIC VBOXSTRICTRC
7519iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7520{
7521 uint8_t *pu8Src;
7522 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7523 (void **)&pu8Src,
7524 enmOpSize == IEMMODE_64BIT
7525 ? 2 + 8
7526 : enmOpSize == IEMMODE_32BIT
7527 ? 2 + 4
7528 : 2 + 3,
7529 iSegReg,
7530 GCPtrMem,
7531 IEM_ACCESS_DATA_W);
7532 if (rcStrict == VINF_SUCCESS)
7533 {
7534 pu8Src[0] = RT_BYTE1(cbLimit);
7535 pu8Src[1] = RT_BYTE2(cbLimit);
7536 pu8Src[2] = RT_BYTE1(GCPtrBase);
7537 pu8Src[3] = RT_BYTE2(GCPtrBase);
7538 pu8Src[4] = RT_BYTE3(GCPtrBase);
7539 if (enmOpSize == IEMMODE_16BIT)
7540 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7541 else
7542 {
7543 pu8Src[5] = RT_BYTE4(GCPtrBase);
7544 if (enmOpSize == IEMMODE_64BIT)
7545 {
7546 pu8Src[6] = RT_BYTE5(GCPtrBase);
7547 pu8Src[7] = RT_BYTE6(GCPtrBase);
7548 pu8Src[8] = RT_BYTE7(GCPtrBase);
7549 pu8Src[9] = RT_BYTE8(GCPtrBase);
7550 }
7551 }
7552 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7553 }
7554 return rcStrict;
7555}
7556
7557
7558/**
7559 * Pushes a word onto the stack.
7560 *
7561 * @returns Strict VBox status code.
7562 * @param pIemCpu The IEM per CPU data.
7563 * @param u16Value The value to push.
7564 */
7565IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7566{
7567 /* Increment the stack pointer. */
7568 uint64_t uNewRsp;
7569 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7570 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7571
7572 /* Write the word the lazy way. */
7573 uint16_t *pu16Dst;
7574 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7575 if (rc == VINF_SUCCESS)
7576 {
7577 *pu16Dst = u16Value;
7578 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7579 }
7580
7581 /* Commit the new RSP value unless we an access handler made trouble. */
7582 if (rc == VINF_SUCCESS)
7583 pCtx->rsp = uNewRsp;
7584
7585 return rc;
7586}
7587
7588
7589/**
7590 * Pushes a dword onto the stack.
7591 *
7592 * @returns Strict VBox status code.
7593 * @param pIemCpu The IEM per CPU data.
7594 * @param u32Value The value to push.
7595 */
7596IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7597{
7598 /* Increment the stack pointer. */
7599 uint64_t uNewRsp;
7600 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7601 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7602
7603 /* Write the dword the lazy way. */
7604 uint32_t *pu32Dst;
7605 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7606 if (rc == VINF_SUCCESS)
7607 {
7608 *pu32Dst = u32Value;
7609 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7610 }
7611
7612 /* Commit the new RSP value unless we an access handler made trouble. */
7613 if (rc == VINF_SUCCESS)
7614 pCtx->rsp = uNewRsp;
7615
7616 return rc;
7617}
7618
7619
7620/**
7621 * Pushes a dword segment register value onto the stack.
7622 *
7623 * @returns Strict VBox status code.
7624 * @param pIemCpu The IEM per CPU data.
7625 * @param u16Value The value to push.
7626 */
7627IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7628{
7629 /* Increment the stack pointer. */
7630 uint64_t uNewRsp;
7631 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7632 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7633
7634 VBOXSTRICTRC rc;
7635 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7636 {
7637 /* The recompiler writes a full dword. */
7638 uint32_t *pu32Dst;
7639 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7640 if (rc == VINF_SUCCESS)
7641 {
7642 *pu32Dst = u32Value;
7643 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7644 }
7645 }
7646 else
7647 {
7648 /* The intel docs talks about zero extending the selector register
7649 value. My actual intel CPU here might be zero extending the value
7650 but it still only writes the lower word... */
7651 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7652 * happens when crossing an electric page boundrary, is the high word
7653 * checked for write accessibility or not? Probably it is. What about
7654 * segment limits? */
7655 uint16_t *pu16Dst;
7656 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7657 if (rc == VINF_SUCCESS)
7658 {
7659 *pu16Dst = (uint16_t)u32Value;
7660 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7661 }
7662 }
7663
7664 /* Commit the new RSP value unless we an access handler made trouble. */
7665 if (rc == VINF_SUCCESS)
7666 pCtx->rsp = uNewRsp;
7667
7668 return rc;
7669}
7670
7671
7672/**
7673 * Pushes a qword onto the stack.
7674 *
7675 * @returns Strict VBox status code.
7676 * @param pIemCpu The IEM per CPU data.
7677 * @param u64Value The value to push.
7678 */
7679IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7680{
7681 /* Increment the stack pointer. */
7682 uint64_t uNewRsp;
7683 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7684 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7685
7686 /* Write the word the lazy way. */
7687 uint64_t *pu64Dst;
7688 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7689 if (rc == VINF_SUCCESS)
7690 {
7691 *pu64Dst = u64Value;
7692 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7693 }
7694
7695 /* Commit the new RSP value unless we an access handler made trouble. */
7696 if (rc == VINF_SUCCESS)
7697 pCtx->rsp = uNewRsp;
7698
7699 return rc;
7700}
7701
7702
7703/**
7704 * Pops a word from the stack.
7705 *
7706 * @returns Strict VBox status code.
7707 * @param pIemCpu The IEM per CPU data.
7708 * @param pu16Value Where to store the popped value.
7709 */
7710IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7711{
7712 /* Increment the stack pointer. */
7713 uint64_t uNewRsp;
7714 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7715 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7716
7717 /* Write the word the lazy way. */
7718 uint16_t const *pu16Src;
7719 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7720 if (rc == VINF_SUCCESS)
7721 {
7722 *pu16Value = *pu16Src;
7723 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7724
7725 /* Commit the new RSP value. */
7726 if (rc == VINF_SUCCESS)
7727 pCtx->rsp = uNewRsp;
7728 }
7729
7730 return rc;
7731}
7732
7733
7734/**
7735 * Pops a dword from the stack.
7736 *
7737 * @returns Strict VBox status code.
7738 * @param pIemCpu The IEM per CPU data.
7739 * @param pu32Value Where to store the popped value.
7740 */
7741IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7742{
7743 /* Increment the stack pointer. */
7744 uint64_t uNewRsp;
7745 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7746 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7747
7748 /* Write the word the lazy way. */
7749 uint32_t const *pu32Src;
7750 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7751 if (rc == VINF_SUCCESS)
7752 {
7753 *pu32Value = *pu32Src;
7754 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7755
7756 /* Commit the new RSP value. */
7757 if (rc == VINF_SUCCESS)
7758 pCtx->rsp = uNewRsp;
7759 }
7760
7761 return rc;
7762}
7763
7764
7765/**
7766 * Pops a qword from the stack.
7767 *
7768 * @returns Strict VBox status code.
7769 * @param pIemCpu The IEM per CPU data.
7770 * @param pu64Value Where to store the popped value.
7771 */
7772IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7773{
7774 /* Increment the stack pointer. */
7775 uint64_t uNewRsp;
7776 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7777 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7778
7779 /* Write the word the lazy way. */
7780 uint64_t const *pu64Src;
7781 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7782 if (rc == VINF_SUCCESS)
7783 {
7784 *pu64Value = *pu64Src;
7785 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7786
7787 /* Commit the new RSP value. */
7788 if (rc == VINF_SUCCESS)
7789 pCtx->rsp = uNewRsp;
7790 }
7791
7792 return rc;
7793}
7794
7795
7796/**
7797 * Pushes a word onto the stack, using a temporary stack pointer.
7798 *
7799 * @returns Strict VBox status code.
7800 * @param pIemCpu The IEM per CPU data.
7801 * @param u16Value The value to push.
7802 * @param pTmpRsp Pointer to the temporary stack pointer.
7803 */
7804IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7805{
7806 /* Increment the stack pointer. */
7807 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7808 RTUINT64U NewRsp = *pTmpRsp;
7809 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7810
7811 /* Write the word the lazy way. */
7812 uint16_t *pu16Dst;
7813 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7814 if (rc == VINF_SUCCESS)
7815 {
7816 *pu16Dst = u16Value;
7817 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7818 }
7819
7820 /* Commit the new RSP value unless we an access handler made trouble. */
7821 if (rc == VINF_SUCCESS)
7822 *pTmpRsp = NewRsp;
7823
7824 return rc;
7825}
7826
7827
7828/**
7829 * Pushes a dword onto the stack, using a temporary stack pointer.
7830 *
7831 * @returns Strict VBox status code.
7832 * @param pIemCpu The IEM per CPU data.
7833 * @param u32Value The value to push.
7834 * @param pTmpRsp Pointer to the temporary stack pointer.
7835 */
7836IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7837{
7838 /* Increment the stack pointer. */
7839 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7840 RTUINT64U NewRsp = *pTmpRsp;
7841 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7842
7843 /* Write the word the lazy way. */
7844 uint32_t *pu32Dst;
7845 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7846 if (rc == VINF_SUCCESS)
7847 {
7848 *pu32Dst = u32Value;
7849 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7850 }
7851
7852 /* Commit the new RSP value unless we an access handler made trouble. */
7853 if (rc == VINF_SUCCESS)
7854 *pTmpRsp = NewRsp;
7855
7856 return rc;
7857}
7858
7859
7860/**
7861 * Pushes a dword onto the stack, using a temporary stack pointer.
7862 *
7863 * @returns Strict VBox status code.
7864 * @param pIemCpu The IEM per CPU data.
7865 * @param u64Value The value to push.
7866 * @param pTmpRsp Pointer to the temporary stack pointer.
7867 */
7868IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7869{
7870 /* Increment the stack pointer. */
7871 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7872 RTUINT64U NewRsp = *pTmpRsp;
7873 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7874
7875 /* Write the word the lazy way. */
7876 uint64_t *pu64Dst;
7877 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7878 if (rc == VINF_SUCCESS)
7879 {
7880 *pu64Dst = u64Value;
7881 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7882 }
7883
7884 /* Commit the new RSP value unless we an access handler made trouble. */
7885 if (rc == VINF_SUCCESS)
7886 *pTmpRsp = NewRsp;
7887
7888 return rc;
7889}
7890
7891
7892/**
7893 * Pops a word from the stack, using a temporary stack pointer.
7894 *
7895 * @returns Strict VBox status code.
7896 * @param pIemCpu The IEM per CPU data.
7897 * @param pu16Value Where to store the popped value.
7898 * @param pTmpRsp Pointer to the temporary stack pointer.
7899 */
7900IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7901{
7902 /* Increment the stack pointer. */
7903 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7904 RTUINT64U NewRsp = *pTmpRsp;
7905 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7906
7907 /* Write the word the lazy way. */
7908 uint16_t const *pu16Src;
7909 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7910 if (rc == VINF_SUCCESS)
7911 {
7912 *pu16Value = *pu16Src;
7913 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7914
7915 /* Commit the new RSP value. */
7916 if (rc == VINF_SUCCESS)
7917 *pTmpRsp = NewRsp;
7918 }
7919
7920 return rc;
7921}
7922
7923
7924/**
7925 * Pops a dword from the stack, using a temporary stack pointer.
7926 *
7927 * @returns Strict VBox status code.
7928 * @param pIemCpu The IEM per CPU data.
7929 * @param pu32Value Where to store the popped value.
7930 * @param pTmpRsp Pointer to the temporary stack pointer.
7931 */
7932IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7933{
7934 /* Increment the stack pointer. */
7935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7936 RTUINT64U NewRsp = *pTmpRsp;
7937 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7938
7939 /* Write the word the lazy way. */
7940 uint32_t const *pu32Src;
7941 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7942 if (rc == VINF_SUCCESS)
7943 {
7944 *pu32Value = *pu32Src;
7945 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7946
7947 /* Commit the new RSP value. */
7948 if (rc == VINF_SUCCESS)
7949 *pTmpRsp = NewRsp;
7950 }
7951
7952 return rc;
7953}
7954
7955
7956/**
7957 * Pops a qword from the stack, using a temporary stack pointer.
7958 *
7959 * @returns Strict VBox status code.
7960 * @param pIemCpu The IEM per CPU data.
7961 * @param pu64Value Where to store the popped value.
7962 * @param pTmpRsp Pointer to the temporary stack pointer.
7963 */
7964IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
7965{
7966 /* Increment the stack pointer. */
7967 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7968 RTUINT64U NewRsp = *pTmpRsp;
7969 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7970
7971 /* Write the word the lazy way. */
7972 uint64_t const *pu64Src;
7973 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7974 if (rcStrict == VINF_SUCCESS)
7975 {
7976 *pu64Value = *pu64Src;
7977 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7978
7979 /* Commit the new RSP value. */
7980 if (rcStrict == VINF_SUCCESS)
7981 *pTmpRsp = NewRsp;
7982 }
7983
7984 return rcStrict;
7985}
7986
7987
7988/**
7989 * Begin a special stack push (used by interrupt, exceptions and such).
7990 *
7991 * This will raise #SS or #PF if appropriate.
7992 *
7993 * @returns Strict VBox status code.
7994 * @param pIemCpu The IEM per CPU data.
7995 * @param cbMem The number of bytes to push onto the stack.
7996 * @param ppvMem Where to return the pointer to the stack memory.
7997 * As with the other memory functions this could be
7998 * direct access or bounce buffered access, so
7999 * don't commit register until the commit call
8000 * succeeds.
8001 * @param puNewRsp Where to return the new RSP value. This must be
8002 * passed unchanged to
8003 * iemMemStackPushCommitSpecial().
8004 */
8005IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8006{
8007 Assert(cbMem < UINT8_MAX);
8008 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8009 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8010 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8011}
8012
8013
8014/**
8015 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8016 *
8017 * This will update the rSP.
8018 *
8019 * @returns Strict VBox status code.
8020 * @param pIemCpu The IEM per CPU data.
8021 * @param pvMem The pointer returned by
8022 * iemMemStackPushBeginSpecial().
8023 * @param uNewRsp The new RSP value returned by
8024 * iemMemStackPushBeginSpecial().
8025 */
8026IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8027{
8028 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8029 if (rcStrict == VINF_SUCCESS)
8030 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8031 return rcStrict;
8032}
8033
8034
8035/**
8036 * Begin a special stack pop (used by iret, retf and such).
8037 *
8038 * This will raise \#SS or \#PF if appropriate.
8039 *
8040 * @returns Strict VBox status code.
8041 * @param pIemCpu The IEM per CPU data.
8042 * @param cbMem The number of bytes to push onto the stack.
8043 * @param ppvMem Where to return the pointer to the stack memory.
8044 * @param puNewRsp Where to return the new RSP value. This must be
8045 * passed unchanged to
8046 * iemMemStackPopCommitSpecial() or applied
8047 * manually if iemMemStackPopDoneSpecial() is used.
8048 */
8049IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8050{
8051 Assert(cbMem < UINT8_MAX);
8052 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8053 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8054 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8055}
8056
8057
8058/**
8059 * Continue a special stack pop (used by iret and retf).
8060 *
8061 * This will raise \#SS or \#PF if appropriate.
8062 *
8063 * @returns Strict VBox status code.
8064 * @param pIemCpu The IEM per CPU data.
8065 * @param cbMem The number of bytes to push onto the stack.
8066 * @param ppvMem Where to return the pointer to the stack memory.
8067 * @param puNewRsp Where to return the new RSP value. This must be
8068 * passed unchanged to
8069 * iemMemStackPopCommitSpecial() or applied
8070 * manually if iemMemStackPopDoneSpecial() is used.
8071 */
8072IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8073{
8074 Assert(cbMem < UINT8_MAX);
8075 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8076 RTUINT64U NewRsp;
8077 NewRsp.u = *puNewRsp;
8078 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8079 *puNewRsp = NewRsp.u;
8080 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8081}
8082
8083
8084/**
8085 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8086 *
8087 * This will update the rSP.
8088 *
8089 * @returns Strict VBox status code.
8090 * @param pIemCpu The IEM per CPU data.
8091 * @param pvMem The pointer returned by
8092 * iemMemStackPopBeginSpecial().
8093 * @param uNewRsp The new RSP value returned by
8094 * iemMemStackPopBeginSpecial().
8095 */
8096IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8097{
8098 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8099 if (rcStrict == VINF_SUCCESS)
8100 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8101 return rcStrict;
8102}
8103
8104
8105/**
8106 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8107 * iemMemStackPopContinueSpecial).
8108 *
8109 * The caller will manually commit the rSP.
8110 *
8111 * @returns Strict VBox status code.
8112 * @param pIemCpu The IEM per CPU data.
8113 * @param pvMem The pointer returned by
8114 * iemMemStackPopBeginSpecial() or
8115 * iemMemStackPopContinueSpecial().
8116 */
8117IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8118{
8119 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8120}
8121
8122
8123/**
8124 * Fetches a system table byte.
8125 *
8126 * @returns Strict VBox status code.
8127 * @param pIemCpu The IEM per CPU data.
8128 * @param pbDst Where to return the byte.
8129 * @param iSegReg The index of the segment register to use for
8130 * this access. The base and limits are checked.
8131 * @param GCPtrMem The address of the guest memory.
8132 */
8133IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8134{
8135 /* The lazy approach for now... */
8136 uint8_t const *pbSrc;
8137 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8138 if (rc == VINF_SUCCESS)
8139 {
8140 *pbDst = *pbSrc;
8141 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8142 }
8143 return rc;
8144}
8145
8146
8147/**
8148 * Fetches a system table word.
8149 *
8150 * @returns Strict VBox status code.
8151 * @param pIemCpu The IEM per CPU data.
8152 * @param pu16Dst Where to return the word.
8153 * @param iSegReg The index of the segment register to use for
8154 * this access. The base and limits are checked.
8155 * @param GCPtrMem The address of the guest memory.
8156 */
8157IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8158{
8159 /* The lazy approach for now... */
8160 uint16_t const *pu16Src;
8161 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8162 if (rc == VINF_SUCCESS)
8163 {
8164 *pu16Dst = *pu16Src;
8165 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8166 }
8167 return rc;
8168}
8169
8170
8171/**
8172 * Fetches a system table dword.
8173 *
8174 * @returns Strict VBox status code.
8175 * @param pIemCpu The IEM per CPU data.
8176 * @param pu32Dst Where to return the dword.
8177 * @param iSegReg The index of the segment register to use for
8178 * this access. The base and limits are checked.
8179 * @param GCPtrMem The address of the guest memory.
8180 */
8181IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8182{
8183 /* The lazy approach for now... */
8184 uint32_t const *pu32Src;
8185 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8186 if (rc == VINF_SUCCESS)
8187 {
8188 *pu32Dst = *pu32Src;
8189 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8190 }
8191 return rc;
8192}
8193
8194
8195/**
8196 * Fetches a system table qword.
8197 *
8198 * @returns Strict VBox status code.
8199 * @param pIemCpu The IEM per CPU data.
8200 * @param pu64Dst Where to return the qword.
8201 * @param iSegReg The index of the segment register to use for
8202 * this access. The base and limits are checked.
8203 * @param GCPtrMem The address of the guest memory.
8204 */
8205IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8206{
8207 /* The lazy approach for now... */
8208 uint64_t const *pu64Src;
8209 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8210 if (rc == VINF_SUCCESS)
8211 {
8212 *pu64Dst = *pu64Src;
8213 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8214 }
8215 return rc;
8216}
8217
8218
8219/**
8220 * Fetches a descriptor table entry with caller specified error code.
8221 *
8222 * @returns Strict VBox status code.
8223 * @param pIemCpu The IEM per CPU.
8224 * @param pDesc Where to return the descriptor table entry.
8225 * @param uSel The selector which table entry to fetch.
8226 * @param uXcpt The exception to raise on table lookup error.
8227 * @param uErrorCode The error code associated with the exception.
8228 */
8229IEM_STATIC VBOXSTRICTRC
8230iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8231{
8232 AssertPtr(pDesc);
8233 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8234
8235 /** @todo did the 286 require all 8 bytes to be accessible? */
8236 /*
8237 * Get the selector table base and check bounds.
8238 */
8239 RTGCPTR GCPtrBase;
8240 if (uSel & X86_SEL_LDT)
8241 {
8242 if ( !pCtx->ldtr.Attr.n.u1Present
8243 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8244 {
8245 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8246 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8247 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8248 uErrorCode, 0);
8249 }
8250
8251 Assert(pCtx->ldtr.Attr.n.u1Present);
8252 GCPtrBase = pCtx->ldtr.u64Base;
8253 }
8254 else
8255 {
8256 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8257 {
8258 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8259 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8260 uErrorCode, 0);
8261 }
8262 GCPtrBase = pCtx->gdtr.pGdt;
8263 }
8264
8265 /*
8266 * Read the legacy descriptor and maybe the long mode extensions if
8267 * required.
8268 */
8269 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8270 if (rcStrict == VINF_SUCCESS)
8271 {
8272 if ( !IEM_IS_LONG_MODE(pIemCpu)
8273 || pDesc->Legacy.Gen.u1DescType)
8274 pDesc->Long.au64[1] = 0;
8275 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8276 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8277 else
8278 {
8279 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8280 /** @todo is this the right exception? */
8281 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8282 }
8283 }
8284 return rcStrict;
8285}
8286
8287
8288/**
8289 * Fetches a descriptor table entry.
8290 *
8291 * @returns Strict VBox status code.
8292 * @param pIemCpu The IEM per CPU.
8293 * @param pDesc Where to return the descriptor table entry.
8294 * @param uSel The selector which table entry to fetch.
8295 * @param uXcpt The exception to raise on table lookup error.
8296 */
8297IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8298{
8299 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8300}
8301
8302
8303/**
8304 * Fakes a long mode stack selector for SS = 0.
8305 *
8306 * @param pDescSs Where to return the fake stack descriptor.
8307 * @param uDpl The DPL we want.
8308 */
8309IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8310{
8311 pDescSs->Long.au64[0] = 0;
8312 pDescSs->Long.au64[1] = 0;
8313 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8314 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8315 pDescSs->Long.Gen.u2Dpl = uDpl;
8316 pDescSs->Long.Gen.u1Present = 1;
8317 pDescSs->Long.Gen.u1Long = 1;
8318}
8319
8320
8321/**
8322 * Marks the selector descriptor as accessed (only non-system descriptors).
8323 *
8324 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8325 * will therefore skip the limit checks.
8326 *
8327 * @returns Strict VBox status code.
8328 * @param pIemCpu The IEM per CPU.
8329 * @param uSel The selector.
8330 */
8331IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8332{
8333 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8334
8335 /*
8336 * Get the selector table base and calculate the entry address.
8337 */
8338 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8339 ? pCtx->ldtr.u64Base
8340 : pCtx->gdtr.pGdt;
8341 GCPtr += uSel & X86_SEL_MASK;
8342
8343 /*
8344 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8345 * ugly stuff to avoid this. This will make sure it's an atomic access
8346 * as well more or less remove any question about 8-bit or 32-bit accesss.
8347 */
8348 VBOXSTRICTRC rcStrict;
8349 uint32_t volatile *pu32;
8350 if ((GCPtr & 3) == 0)
8351 {
8352 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8353 GCPtr += 2 + 2;
8354 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8355 if (rcStrict != VINF_SUCCESS)
8356 return rcStrict;
8357 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8358 }
8359 else
8360 {
8361 /* The misaligned GDT/LDT case, map the whole thing. */
8362 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8363 if (rcStrict != VINF_SUCCESS)
8364 return rcStrict;
8365 switch ((uintptr_t)pu32 & 3)
8366 {
8367 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8368 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8369 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8370 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8371 }
8372 }
8373
8374 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8375}
8376
8377/** @} */
8378
8379
8380/*
8381 * Include the C/C++ implementation of instruction.
8382 */
8383#include "IEMAllCImpl.cpp.h"
8384
8385
8386
8387/** @name "Microcode" macros.
8388 *
8389 * The idea is that we should be able to use the same code to interpret
8390 * instructions as well as recompiler instructions. Thus this obfuscation.
8391 *
8392 * @{
8393 */
8394#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8395#define IEM_MC_END() }
8396#define IEM_MC_PAUSE() do {} while (0)
8397#define IEM_MC_CONTINUE() do {} while (0)
8398
8399/** Internal macro. */
8400#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8401 do \
8402 { \
8403 VBOXSTRICTRC rcStrict2 = a_Expr; \
8404 if (rcStrict2 != VINF_SUCCESS) \
8405 return rcStrict2; \
8406 } while (0)
8407
8408#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8409#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8410#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8411#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8412#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8413#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8414#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8415
8416#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8417#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8418 do { \
8419 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8420 return iemRaiseDeviceNotAvailable(pIemCpu); \
8421 } while (0)
8422#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8423 do { \
8424 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8425 return iemRaiseMathFault(pIemCpu); \
8426 } while (0)
8427#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8428 do { \
8429 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8430 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8431 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8432 return iemRaiseUndefinedOpcode(pIemCpu); \
8433 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8434 return iemRaiseDeviceNotAvailable(pIemCpu); \
8435 } while (0)
8436#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8437 do { \
8438 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8439 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8440 return iemRaiseUndefinedOpcode(pIemCpu); \
8441 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8442 return iemRaiseDeviceNotAvailable(pIemCpu); \
8443 } while (0)
8444#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8445 do { \
8446 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8447 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8448 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8449 return iemRaiseUndefinedOpcode(pIemCpu); \
8450 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8451 return iemRaiseDeviceNotAvailable(pIemCpu); \
8452 } while (0)
8453#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8454 do { \
8455 if (pIemCpu->uCpl != 0) \
8456 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8457 } while (0)
8458
8459
8460#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8461#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8462#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8463#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8464#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8465#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8466#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8467 uint32_t a_Name; \
8468 uint32_t *a_pName = &a_Name
8469#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8470 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8471
8472#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8473#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8474
8475#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8476#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8477#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8478#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8479#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8480#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8481#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8482#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8483#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8484#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8485#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8486#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8487#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8488#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8489#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8490#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8491#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8492#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8493#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8494#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8495#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8496#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8497#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8498#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8499#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8500#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8501#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8502#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8503#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8504/** @note Not for IOPL or IF testing or modification. */
8505#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8506#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8507#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8508#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8509
8510#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8511#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8512#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8513#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8514#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8515#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8516#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8517#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8518#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8519#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8520#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8521 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8522
8523#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8524#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8525/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8526 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8527#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8528#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8529/** @note Not for IOPL or IF testing or modification. */
8530#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8531
8532#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8533#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8534#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8535 do { \
8536 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8537 *pu32Reg += (a_u32Value); \
8538 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8539 } while (0)
8540#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8541
8542#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8543#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8544#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8545 do { \
8546 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8547 *pu32Reg -= (a_u32Value); \
8548 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8549 } while (0)
8550#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8551
8552#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8553#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8554#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8555#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8556#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8557#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8558#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8559
8560#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8561#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8562#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8563#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8564
8565#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8566#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8567#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8568
8569#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8570#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8571
8572#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8573#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8574#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8575
8576#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8577#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8578#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8579
8580#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8581
8582#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8583
8584#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8585#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8586#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8587 do { \
8588 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8589 *pu32Reg &= (a_u32Value); \
8590 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8591 } while (0)
8592#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8593
8594#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8595#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8596#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8597 do { \
8598 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8599 *pu32Reg |= (a_u32Value); \
8600 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8601 } while (0)
8602#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8603
8604
8605/** @note Not for IOPL or IF modification. */
8606#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8607/** @note Not for IOPL or IF modification. */
8608#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8609/** @note Not for IOPL or IF modification. */
8610#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8611
8612#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8613
8614
8615#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8616 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8617#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8618 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8619#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8620 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8621#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8622 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8623#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8624 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8625#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8626 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8627#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8628 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8629
8630#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8631 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8632#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8633 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8634#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8635 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8636#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8637 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8638#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8639 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8640 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8641 } while (0)
8642#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8643 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8644 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8645 } while (0)
8646#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8647 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8648#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8649 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8650#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8651 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8652
8653#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8654 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8655#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8656 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8657#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8658 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8659
8660#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8661 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8662#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8663 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8664#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8665 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8666
8667#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8668 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8669#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8670 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8671#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8672 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8673
8674#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8675 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8676
8677#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8678 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8679#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8680 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8681#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8683#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8685
8686#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8687 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8688#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8689 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8690#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8691 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8692
8693#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8695#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8696 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8697
8698
8699
8700#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8701 do { \
8702 uint8_t u8Tmp; \
8703 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8704 (a_u16Dst) = u8Tmp; \
8705 } while (0)
8706#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8707 do { \
8708 uint8_t u8Tmp; \
8709 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8710 (a_u32Dst) = u8Tmp; \
8711 } while (0)
8712#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8713 do { \
8714 uint8_t u8Tmp; \
8715 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8716 (a_u64Dst) = u8Tmp; \
8717 } while (0)
8718#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8719 do { \
8720 uint16_t u16Tmp; \
8721 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8722 (a_u32Dst) = u16Tmp; \
8723 } while (0)
8724#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8725 do { \
8726 uint16_t u16Tmp; \
8727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8728 (a_u64Dst) = u16Tmp; \
8729 } while (0)
8730#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8731 do { \
8732 uint32_t u32Tmp; \
8733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8734 (a_u64Dst) = u32Tmp; \
8735 } while (0)
8736
8737#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8738 do { \
8739 uint8_t u8Tmp; \
8740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8741 (a_u16Dst) = (int8_t)u8Tmp; \
8742 } while (0)
8743#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8744 do { \
8745 uint8_t u8Tmp; \
8746 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8747 (a_u32Dst) = (int8_t)u8Tmp; \
8748 } while (0)
8749#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8750 do { \
8751 uint8_t u8Tmp; \
8752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8753 (a_u64Dst) = (int8_t)u8Tmp; \
8754 } while (0)
8755#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8756 do { \
8757 uint16_t u16Tmp; \
8758 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8759 (a_u32Dst) = (int16_t)u16Tmp; \
8760 } while (0)
8761#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8762 do { \
8763 uint16_t u16Tmp; \
8764 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8765 (a_u64Dst) = (int16_t)u16Tmp; \
8766 } while (0)
8767#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8768 do { \
8769 uint32_t u32Tmp; \
8770 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8771 (a_u64Dst) = (int32_t)u32Tmp; \
8772 } while (0)
8773
8774#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8775 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8776#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8777 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8778#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8779 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8780#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8781 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8782
8783#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8784 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8785#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8786 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8787#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8788 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8789#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8790 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8791
8792#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8793#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8794#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8795#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8796#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8797#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8798#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8799 do { \
8800 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8801 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8802 } while (0)
8803
8804#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8805 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8806#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8807 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8808
8809
8810#define IEM_MC_PUSH_U16(a_u16Value) \
8811 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8812#define IEM_MC_PUSH_U32(a_u32Value) \
8813 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8814#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8815 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8816#define IEM_MC_PUSH_U64(a_u64Value) \
8817 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8818
8819#define IEM_MC_POP_U16(a_pu16Value) \
8820 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8821#define IEM_MC_POP_U32(a_pu32Value) \
8822 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8823#define IEM_MC_POP_U64(a_pu64Value) \
8824 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8825
8826/** Maps guest memory for direct or bounce buffered access.
8827 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8828 * @remarks May return.
8829 */
8830#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8831 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8832
8833/** Maps guest memory for direct or bounce buffered access.
8834 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8835 * @remarks May return.
8836 */
8837#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8838 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8839
8840/** Commits the memory and unmaps the guest memory.
8841 * @remarks May return.
8842 */
8843#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8844 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8845
8846/** Commits the memory and unmaps the guest memory unless the FPU status word
8847 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8848 * that would cause FLD not to store.
8849 *
8850 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8851 * store, while \#P will not.
8852 *
8853 * @remarks May in theory return - for now.
8854 */
8855#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8856 do { \
8857 if ( !(a_u16FSW & X86_FSW_ES) \
8858 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8859 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8860 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8861 } while (0)
8862
8863/** Calculate efficient address from R/M. */
8864#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8865 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8866
8867#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8868#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8869#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8870#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8871#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8872#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8873#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8874
8875/**
8876 * Defers the rest of the instruction emulation to a C implementation routine
8877 * and returns, only taking the standard parameters.
8878 *
8879 * @param a_pfnCImpl The pointer to the C routine.
8880 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8881 */
8882#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8883
8884/**
8885 * Defers the rest of instruction emulation to a C implementation routine and
8886 * returns, taking one argument in addition to the standard ones.
8887 *
8888 * @param a_pfnCImpl The pointer to the C routine.
8889 * @param a0 The argument.
8890 */
8891#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8892
8893/**
8894 * Defers the rest of the instruction emulation to a C implementation routine
8895 * and returns, taking two arguments in addition to the standard ones.
8896 *
8897 * @param a_pfnCImpl The pointer to the C routine.
8898 * @param a0 The first extra argument.
8899 * @param a1 The second extra argument.
8900 */
8901#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8902
8903/**
8904 * Defers the rest of the instruction emulation to a C implementation routine
8905 * and returns, taking three arguments in addition to the standard ones.
8906 *
8907 * @param a_pfnCImpl The pointer to the C routine.
8908 * @param a0 The first extra argument.
8909 * @param a1 The second extra argument.
8910 * @param a2 The third extra argument.
8911 */
8912#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8913
8914/**
8915 * Defers the rest of the instruction emulation to a C implementation routine
8916 * and returns, taking four arguments in addition to the standard ones.
8917 *
8918 * @param a_pfnCImpl The pointer to the C routine.
8919 * @param a0 The first extra argument.
8920 * @param a1 The second extra argument.
8921 * @param a2 The third extra argument.
8922 * @param a3 The fourth extra argument.
8923 */
8924#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8925
8926/**
8927 * Defers the rest of the instruction emulation to a C implementation routine
8928 * and returns, taking two arguments in addition to the standard ones.
8929 *
8930 * @param a_pfnCImpl The pointer to the C routine.
8931 * @param a0 The first extra argument.
8932 * @param a1 The second extra argument.
8933 * @param a2 The third extra argument.
8934 * @param a3 The fourth extra argument.
8935 * @param a4 The fifth extra argument.
8936 */
8937#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8938
8939/**
8940 * Defers the entire instruction emulation to a C implementation routine and
8941 * returns, only taking the standard parameters.
8942 *
8943 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8944 *
8945 * @param a_pfnCImpl The pointer to the C routine.
8946 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8947 */
8948#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8949
8950/**
8951 * Defers the entire instruction emulation to a C implementation routine and
8952 * returns, taking one argument in addition to the standard ones.
8953 *
8954 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8955 *
8956 * @param a_pfnCImpl The pointer to the C routine.
8957 * @param a0 The argument.
8958 */
8959#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8960
8961/**
8962 * Defers the entire instruction emulation to a C implementation routine and
8963 * returns, taking two arguments in addition to the standard ones.
8964 *
8965 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8966 *
8967 * @param a_pfnCImpl The pointer to the C routine.
8968 * @param a0 The first extra argument.
8969 * @param a1 The second extra argument.
8970 */
8971#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8972
8973/**
8974 * Defers the entire instruction emulation to a C implementation routine and
8975 * returns, taking three arguments in addition to the standard ones.
8976 *
8977 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8978 *
8979 * @param a_pfnCImpl The pointer to the C routine.
8980 * @param a0 The first extra argument.
8981 * @param a1 The second extra argument.
8982 * @param a2 The third extra argument.
8983 */
8984#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8985
8986/**
8987 * Calls a FPU assembly implementation taking one visible argument.
8988 *
8989 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8990 * @param a0 The first extra argument.
8991 */
8992#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
8993 do { \
8994 iemFpuPrepareUsage(pIemCpu); \
8995 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
8996 } while (0)
8997
8998/**
8999 * Calls a FPU assembly implementation taking two visible arguments.
9000 *
9001 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9002 * @param a0 The first extra argument.
9003 * @param a1 The second extra argument.
9004 */
9005#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9006 do { \
9007 iemFpuPrepareUsage(pIemCpu); \
9008 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9009 } while (0)
9010
9011/**
9012 * Calls a FPU assembly implementation taking three visible arguments.
9013 *
9014 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9015 * @param a0 The first extra argument.
9016 * @param a1 The second extra argument.
9017 * @param a2 The third extra argument.
9018 */
9019#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9020 do { \
9021 iemFpuPrepareUsage(pIemCpu); \
9022 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9023 } while (0)
9024
9025#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9026 do { \
9027 (a_FpuData).FSW = (a_FSW); \
9028 (a_FpuData).r80Result = *(a_pr80Value); \
9029 } while (0)
9030
9031/** Pushes FPU result onto the stack. */
9032#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9033 iemFpuPushResult(pIemCpu, &a_FpuData)
9034/** Pushes FPU result onto the stack and sets the FPUDP. */
9035#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9036 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9037
9038/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9039#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9040 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9041
9042/** Stores FPU result in a stack register. */
9043#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9044 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9045/** Stores FPU result in a stack register and pops the stack. */
9046#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9047 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9048/** Stores FPU result in a stack register and sets the FPUDP. */
9049#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9050 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9051/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9052 * stack. */
9053#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9054 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9055
9056/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9057#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9058 iemFpuUpdateOpcodeAndIp(pIemCpu)
9059/** Free a stack register (for FFREE and FFREEP). */
9060#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9061 iemFpuStackFree(pIemCpu, a_iStReg)
9062/** Increment the FPU stack pointer. */
9063#define IEM_MC_FPU_STACK_INC_TOP() \
9064 iemFpuStackIncTop(pIemCpu)
9065/** Decrement the FPU stack pointer. */
9066#define IEM_MC_FPU_STACK_DEC_TOP() \
9067 iemFpuStackDecTop(pIemCpu)
9068
9069/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9070#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9071 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9072/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9073#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9074 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9075/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9076#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9077 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9078/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9079#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9080 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9081/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9082 * stack. */
9083#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9084 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9085/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9086#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9087 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9088
9089/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9090#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9091 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9092/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9093 * stack. */
9094#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9095 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9096/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9097 * FPUDS. */
9098#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9099 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9100/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9101 * FPUDS. Pops stack. */
9102#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9103 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9104/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9105 * stack twice. */
9106#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9107 iemFpuStackUnderflowThenPopPop(pIemCpu)
9108/** Raises a FPU stack underflow exception for an instruction pushing a result
9109 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9110#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9111 iemFpuStackPushUnderflow(pIemCpu)
9112/** Raises a FPU stack underflow exception for an instruction pushing a result
9113 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9114#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9115 iemFpuStackPushUnderflowTwo(pIemCpu)
9116
9117/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9118 * FPUIP, FPUCS and FOP. */
9119#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9120 iemFpuStackPushOverflow(pIemCpu)
9121/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9122 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9123#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9124 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9125/** Indicates that we (might) have modified the FPU state. */
9126#define IEM_MC_USED_FPU() \
9127 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9128
9129/**
9130 * Calls a MMX assembly implementation taking two visible arguments.
9131 *
9132 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9133 * @param a0 The first extra argument.
9134 * @param a1 The second extra argument.
9135 */
9136#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9137 do { \
9138 iemFpuPrepareUsage(pIemCpu); \
9139 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9140 } while (0)
9141
9142/**
9143 * Calls a MMX assembly implementation taking three visible arguments.
9144 *
9145 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9146 * @param a0 The first extra argument.
9147 * @param a1 The second extra argument.
9148 * @param a2 The third extra argument.
9149 */
9150#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9151 do { \
9152 iemFpuPrepareUsage(pIemCpu); \
9153 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9154 } while (0)
9155
9156
9157/**
9158 * Calls a SSE assembly implementation taking two visible arguments.
9159 *
9160 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9161 * @param a0 The first extra argument.
9162 * @param a1 The second extra argument.
9163 */
9164#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9165 do { \
9166 iemFpuPrepareUsageSse(pIemCpu); \
9167 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9168 } while (0)
9169
9170/**
9171 * Calls a SSE assembly implementation taking three visible arguments.
9172 *
9173 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9174 * @param a0 The first extra argument.
9175 * @param a1 The second extra argument.
9176 * @param a2 The third extra argument.
9177 */
9178#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9179 do { \
9180 iemFpuPrepareUsageSse(pIemCpu); \
9181 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9182 } while (0)
9183
9184
9185/** @note Not for IOPL or IF testing. */
9186#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9187/** @note Not for IOPL or IF testing. */
9188#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9189/** @note Not for IOPL or IF testing. */
9190#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9191/** @note Not for IOPL or IF testing. */
9192#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9193/** @note Not for IOPL or IF testing. */
9194#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9195 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9196 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9197/** @note Not for IOPL or IF testing. */
9198#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9199 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9200 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9201/** @note Not for IOPL or IF testing. */
9202#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9203 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9204 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9205 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9206/** @note Not for IOPL or IF testing. */
9207#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9208 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9209 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9210 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9211#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9212#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9213#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9214/** @note Not for IOPL or IF testing. */
9215#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9216 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9217 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9218/** @note Not for IOPL or IF testing. */
9219#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9220 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9221 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9222/** @note Not for IOPL or IF testing. */
9223#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9224 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9225 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9226/** @note Not for IOPL or IF testing. */
9227#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9228 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9229 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9230/** @note Not for IOPL or IF testing. */
9231#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9232 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9233 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9234/** @note Not for IOPL or IF testing. */
9235#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9236 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9237 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9238#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9239#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9240#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9241 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9242#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9243 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9244#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9245 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9246#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9247 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9248#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9249 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9250#define IEM_MC_IF_FCW_IM() \
9251 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9252
9253#define IEM_MC_ELSE() } else {
9254#define IEM_MC_ENDIF() } do {} while (0)
9255
9256/** @} */
9257
9258
9259/** @name Opcode Debug Helpers.
9260 * @{
9261 */
9262#ifdef DEBUG
9263# define IEMOP_MNEMONIC(a_szMnemonic) \
9264 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9265 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9266# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9267 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9268 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9269#else
9270# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9271# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9272#endif
9273
9274/** @} */
9275
9276
9277/** @name Opcode Helpers.
9278 * @{
9279 */
9280
9281/** The instruction raises an \#UD in real and V8086 mode. */
9282#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9283 do \
9284 { \
9285 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9286 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9287 } while (0)
9288
9289/** The instruction allows no lock prefixing (in this encoding), throw #UD if
9290 * lock prefixed.
9291 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9292#define IEMOP_HLP_NO_LOCK_PREFIX() \
9293 do \
9294 { \
9295 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9296 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9297 } while (0)
9298
9299/** The instruction is not available in 64-bit mode, throw #UD if we're in
9300 * 64-bit mode. */
9301#define IEMOP_HLP_NO_64BIT() \
9302 do \
9303 { \
9304 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9305 return IEMOP_RAISE_INVALID_OPCODE(); \
9306 } while (0)
9307
9308/** The instruction is only available in 64-bit mode, throw #UD if we're not in
9309 * 64-bit mode. */
9310#define IEMOP_HLP_ONLY_64BIT() \
9311 do \
9312 { \
9313 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9314 return IEMOP_RAISE_INVALID_OPCODE(); \
9315 } while (0)
9316
9317/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9318#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9319 do \
9320 { \
9321 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9322 iemRecalEffOpSize64Default(pIemCpu); \
9323 } while (0)
9324
9325/** The instruction has 64-bit operand size if 64-bit mode. */
9326#define IEMOP_HLP_64BIT_OP_SIZE() \
9327 do \
9328 { \
9329 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9330 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9331 } while (0)
9332
9333/** Only a REX prefix immediately preceeding the first opcode byte takes
9334 * effect. This macro helps ensuring this as well as logging bad guest code. */
9335#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9336 do \
9337 { \
9338 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9339 { \
9340 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9341 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9342 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9343 pIemCpu->uRexB = 0; \
9344 pIemCpu->uRexIndex = 0; \
9345 pIemCpu->uRexReg = 0; \
9346 iemRecalEffOpSize(pIemCpu); \
9347 } \
9348 } while (0)
9349
9350/**
9351 * Done decoding.
9352 */
9353#define IEMOP_HLP_DONE_DECODING() \
9354 do \
9355 { \
9356 /*nothing for now, maybe later... */ \
9357 } while (0)
9358
9359/**
9360 * Done decoding, raise \#UD exception if lock prefix present.
9361 */
9362#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9363 do \
9364 { \
9365 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9366 { /* likely */ } \
9367 else \
9368 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9369 } while (0)
9370#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9371 do \
9372 { \
9373 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9374 { /* likely */ } \
9375 else \
9376 { \
9377 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9378 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9379 } \
9380 } while (0)
9381#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9382 do \
9383 { \
9384 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9385 { /* likely */ } \
9386 else \
9387 { \
9388 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9389 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9390 } \
9391 } while (0)
9392/**
9393 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9394 * are present.
9395 */
9396#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9397 do \
9398 { \
9399 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9400 { /* likely */ } \
9401 else \
9402 return IEMOP_RAISE_INVALID_OPCODE(); \
9403 } while (0)
9404
9405
9406/**
9407 * Calculates the effective address of a ModR/M memory operand.
9408 *
9409 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9410 *
9411 * @return Strict VBox status code.
9412 * @param pIemCpu The IEM per CPU data.
9413 * @param bRm The ModRM byte.
9414 * @param cbImm The size of any immediate following the
9415 * effective address opcode bytes. Important for
9416 * RIP relative addressing.
9417 * @param pGCPtrEff Where to return the effective address.
9418 */
9419IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9420{
9421 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9422 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9423#define SET_SS_DEF() \
9424 do \
9425 { \
9426 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9427 pIemCpu->iEffSeg = X86_SREG_SS; \
9428 } while (0)
9429
9430 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9431 {
9432/** @todo Check the effective address size crap! */
9433 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9434 {
9435 uint16_t u16EffAddr;
9436
9437 /* Handle the disp16 form with no registers first. */
9438 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9439 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9440 else
9441 {
9442 /* Get the displacment. */
9443 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9444 {
9445 case 0: u16EffAddr = 0; break;
9446 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9447 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9448 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9449 }
9450
9451 /* Add the base and index registers to the disp. */
9452 switch (bRm & X86_MODRM_RM_MASK)
9453 {
9454 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9455 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9456 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9457 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9458 case 4: u16EffAddr += pCtx->si; break;
9459 case 5: u16EffAddr += pCtx->di; break;
9460 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9461 case 7: u16EffAddr += pCtx->bx; break;
9462 }
9463 }
9464
9465 *pGCPtrEff = u16EffAddr;
9466 }
9467 else
9468 {
9469 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9470 uint32_t u32EffAddr;
9471
9472 /* Handle the disp32 form with no registers first. */
9473 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9474 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9475 else
9476 {
9477 /* Get the register (or SIB) value. */
9478 switch ((bRm & X86_MODRM_RM_MASK))
9479 {
9480 case 0: u32EffAddr = pCtx->eax; break;
9481 case 1: u32EffAddr = pCtx->ecx; break;
9482 case 2: u32EffAddr = pCtx->edx; break;
9483 case 3: u32EffAddr = pCtx->ebx; break;
9484 case 4: /* SIB */
9485 {
9486 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9487
9488 /* Get the index and scale it. */
9489 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9490 {
9491 case 0: u32EffAddr = pCtx->eax; break;
9492 case 1: u32EffAddr = pCtx->ecx; break;
9493 case 2: u32EffAddr = pCtx->edx; break;
9494 case 3: u32EffAddr = pCtx->ebx; break;
9495 case 4: u32EffAddr = 0; /*none */ break;
9496 case 5: u32EffAddr = pCtx->ebp; break;
9497 case 6: u32EffAddr = pCtx->esi; break;
9498 case 7: u32EffAddr = pCtx->edi; break;
9499 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9500 }
9501 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9502
9503 /* add base */
9504 switch (bSib & X86_SIB_BASE_MASK)
9505 {
9506 case 0: u32EffAddr += pCtx->eax; break;
9507 case 1: u32EffAddr += pCtx->ecx; break;
9508 case 2: u32EffAddr += pCtx->edx; break;
9509 case 3: u32EffAddr += pCtx->ebx; break;
9510 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9511 case 5:
9512 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9513 {
9514 u32EffAddr += pCtx->ebp;
9515 SET_SS_DEF();
9516 }
9517 else
9518 {
9519 uint32_t u32Disp;
9520 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9521 u32EffAddr += u32Disp;
9522 }
9523 break;
9524 case 6: u32EffAddr += pCtx->esi; break;
9525 case 7: u32EffAddr += pCtx->edi; break;
9526 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9527 }
9528 break;
9529 }
9530 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9531 case 6: u32EffAddr = pCtx->esi; break;
9532 case 7: u32EffAddr = pCtx->edi; break;
9533 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9534 }
9535
9536 /* Get and add the displacement. */
9537 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9538 {
9539 case 0:
9540 break;
9541 case 1:
9542 {
9543 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9544 u32EffAddr += i8Disp;
9545 break;
9546 }
9547 case 2:
9548 {
9549 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9550 u32EffAddr += u32Disp;
9551 break;
9552 }
9553 default:
9554 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9555 }
9556
9557 }
9558 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9559 *pGCPtrEff = u32EffAddr;
9560 else
9561 {
9562 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9563 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9564 }
9565 }
9566 }
9567 else
9568 {
9569 uint64_t u64EffAddr;
9570
9571 /* Handle the rip+disp32 form with no registers first. */
9572 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9573 {
9574 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9575 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9576 }
9577 else
9578 {
9579 /* Get the register (or SIB) value. */
9580 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9581 {
9582 case 0: u64EffAddr = pCtx->rax; break;
9583 case 1: u64EffAddr = pCtx->rcx; break;
9584 case 2: u64EffAddr = pCtx->rdx; break;
9585 case 3: u64EffAddr = pCtx->rbx; break;
9586 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9587 case 6: u64EffAddr = pCtx->rsi; break;
9588 case 7: u64EffAddr = pCtx->rdi; break;
9589 case 8: u64EffAddr = pCtx->r8; break;
9590 case 9: u64EffAddr = pCtx->r9; break;
9591 case 10: u64EffAddr = pCtx->r10; break;
9592 case 11: u64EffAddr = pCtx->r11; break;
9593 case 13: u64EffAddr = pCtx->r13; break;
9594 case 14: u64EffAddr = pCtx->r14; break;
9595 case 15: u64EffAddr = pCtx->r15; break;
9596 /* SIB */
9597 case 4:
9598 case 12:
9599 {
9600 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9601
9602 /* Get the index and scale it. */
9603 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9604 {
9605 case 0: u64EffAddr = pCtx->rax; break;
9606 case 1: u64EffAddr = pCtx->rcx; break;
9607 case 2: u64EffAddr = pCtx->rdx; break;
9608 case 3: u64EffAddr = pCtx->rbx; break;
9609 case 4: u64EffAddr = 0; /*none */ break;
9610 case 5: u64EffAddr = pCtx->rbp; break;
9611 case 6: u64EffAddr = pCtx->rsi; break;
9612 case 7: u64EffAddr = pCtx->rdi; break;
9613 case 8: u64EffAddr = pCtx->r8; break;
9614 case 9: u64EffAddr = pCtx->r9; break;
9615 case 10: u64EffAddr = pCtx->r10; break;
9616 case 11: u64EffAddr = pCtx->r11; break;
9617 case 12: u64EffAddr = pCtx->r12; break;
9618 case 13: u64EffAddr = pCtx->r13; break;
9619 case 14: u64EffAddr = pCtx->r14; break;
9620 case 15: u64EffAddr = pCtx->r15; break;
9621 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9622 }
9623 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9624
9625 /* add base */
9626 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9627 {
9628 case 0: u64EffAddr += pCtx->rax; break;
9629 case 1: u64EffAddr += pCtx->rcx; break;
9630 case 2: u64EffAddr += pCtx->rdx; break;
9631 case 3: u64EffAddr += pCtx->rbx; break;
9632 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9633 case 6: u64EffAddr += pCtx->rsi; break;
9634 case 7: u64EffAddr += pCtx->rdi; break;
9635 case 8: u64EffAddr += pCtx->r8; break;
9636 case 9: u64EffAddr += pCtx->r9; break;
9637 case 10: u64EffAddr += pCtx->r10; break;
9638 case 11: u64EffAddr += pCtx->r11; break;
9639 case 12: u64EffAddr += pCtx->r12; break;
9640 case 14: u64EffAddr += pCtx->r14; break;
9641 case 15: u64EffAddr += pCtx->r15; break;
9642 /* complicated encodings */
9643 case 5:
9644 case 13:
9645 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9646 {
9647 if (!pIemCpu->uRexB)
9648 {
9649 u64EffAddr += pCtx->rbp;
9650 SET_SS_DEF();
9651 }
9652 else
9653 u64EffAddr += pCtx->r13;
9654 }
9655 else
9656 {
9657 uint32_t u32Disp;
9658 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9659 u64EffAddr += (int32_t)u32Disp;
9660 }
9661 break;
9662 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9663 }
9664 break;
9665 }
9666 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9667 }
9668
9669 /* Get and add the displacement. */
9670 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9671 {
9672 case 0:
9673 break;
9674 case 1:
9675 {
9676 int8_t i8Disp;
9677 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9678 u64EffAddr += i8Disp;
9679 break;
9680 }
9681 case 2:
9682 {
9683 uint32_t u32Disp;
9684 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9685 u64EffAddr += (int32_t)u32Disp;
9686 break;
9687 }
9688 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9689 }
9690
9691 }
9692
9693 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9694 *pGCPtrEff = u64EffAddr;
9695 else
9696 {
9697 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9698 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9699 }
9700 }
9701
9702 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9703 return VINF_SUCCESS;
9704}
9705
9706/** @} */
9707
9708
9709
9710/*
9711 * Include the instructions
9712 */
9713#include "IEMAllInstructions.cpp.h"
9714
9715
9716
9717
9718#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9719
9720/**
9721 * Sets up execution verification mode.
9722 */
9723IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9724{
9725 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9726 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9727
9728 /*
9729 * Always note down the address of the current instruction.
9730 */
9731 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9732 pIemCpu->uOldRip = pOrgCtx->rip;
9733
9734 /*
9735 * Enable verification and/or logging.
9736 */
9737 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9738 if ( fNewNoRem
9739 && ( 0
9740#if 0 /* auto enable on first paged protected mode interrupt */
9741 || ( pOrgCtx->eflags.Bits.u1IF
9742 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9743 && TRPMHasTrap(pVCpu)
9744 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9745#endif
9746#if 0
9747 || ( pOrgCtx->cs == 0x10
9748 && ( pOrgCtx->rip == 0x90119e3e
9749 || pOrgCtx->rip == 0x901d9810)
9750#endif
9751#if 0 /* Auto enable DSL - FPU stuff. */
9752 || ( pOrgCtx->cs == 0x10
9753 && (// pOrgCtx->rip == 0xc02ec07f
9754 //|| pOrgCtx->rip == 0xc02ec082
9755 //|| pOrgCtx->rip == 0xc02ec0c9
9756 0
9757 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9758#endif
9759#if 0 /* Auto enable DSL - fstp st0 stuff. */
9760 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9761#endif
9762#if 0
9763 || pOrgCtx->rip == 0x9022bb3a
9764#endif
9765#if 0
9766 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9767#endif
9768#if 0
9769 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9770 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9771#endif
9772#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9773 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9774 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9775 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9776#endif
9777#if 0 /* NT4SP1 - xadd early boot. */
9778 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9779#endif
9780#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9781 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9782#endif
9783#if 0 /* NT4SP1 - cmpxchg (AMD). */
9784 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9785#endif
9786#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9787 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9788#endif
9789#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9790 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9791
9792#endif
9793#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9794 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9795
9796#endif
9797#if 0 /* NT4SP1 - frstor [ecx] */
9798 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9799#endif
9800#if 0 /* xxxxxx - All long mode code. */
9801 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9802#endif
9803#if 0 /* rep movsq linux 3.7 64-bit boot. */
9804 || (pOrgCtx->rip == 0x0000000000100241)
9805#endif
9806#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9807 || (pOrgCtx->rip == 0x000000000215e240)
9808#endif
9809#if 0 /* DOS's size-overridden iret to v8086. */
9810 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9811#endif
9812 )
9813 )
9814 {
9815 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9816 RTLogFlags(NULL, "enabled");
9817 fNewNoRem = false;
9818 }
9819 if (fNewNoRem != pIemCpu->fNoRem)
9820 {
9821 pIemCpu->fNoRem = fNewNoRem;
9822 if (!fNewNoRem)
9823 {
9824 LogAlways(("Enabling verification mode!\n"));
9825 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9826 }
9827 else
9828 LogAlways(("Disabling verification mode!\n"));
9829 }
9830
9831 /*
9832 * Switch state.
9833 */
9834 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9835 {
9836 static CPUMCTX s_DebugCtx; /* Ugly! */
9837
9838 s_DebugCtx = *pOrgCtx;
9839 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9840 }
9841
9842 /*
9843 * See if there is an interrupt pending in TRPM and inject it if we can.
9844 */
9845 pIemCpu->uInjectCpl = UINT8_MAX;
9846 if ( pOrgCtx->eflags.Bits.u1IF
9847 && TRPMHasTrap(pVCpu)
9848 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9849 {
9850 uint8_t u8TrapNo;
9851 TRPMEVENT enmType;
9852 RTGCUINT uErrCode;
9853 RTGCPTR uCr2;
9854 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9855 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9856 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9857 TRPMResetTrap(pVCpu);
9858 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9859 }
9860
9861 /*
9862 * Reset the counters.
9863 */
9864 pIemCpu->cIOReads = 0;
9865 pIemCpu->cIOWrites = 0;
9866 pIemCpu->fIgnoreRaxRdx = false;
9867 pIemCpu->fOverlappingMovs = false;
9868 pIemCpu->fProblematicMemory = false;
9869 pIemCpu->fUndefinedEFlags = 0;
9870
9871 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9872 {
9873 /*
9874 * Free all verification records.
9875 */
9876 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9877 pIemCpu->pIemEvtRecHead = NULL;
9878 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9879 do
9880 {
9881 while (pEvtRec)
9882 {
9883 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9884 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9885 pIemCpu->pFreeEvtRec = pEvtRec;
9886 pEvtRec = pNext;
9887 }
9888 pEvtRec = pIemCpu->pOtherEvtRecHead;
9889 pIemCpu->pOtherEvtRecHead = NULL;
9890 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9891 } while (pEvtRec);
9892 }
9893}
9894
9895
9896/**
9897 * Allocate an event record.
9898 * @returns Pointer to a record.
9899 */
9900IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9901{
9902 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9903 return NULL;
9904
9905 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9906 if (pEvtRec)
9907 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9908 else
9909 {
9910 if (!pIemCpu->ppIemEvtRecNext)
9911 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9912
9913 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9914 if (!pEvtRec)
9915 return NULL;
9916 }
9917 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9918 pEvtRec->pNext = NULL;
9919 return pEvtRec;
9920}
9921
9922
9923/**
9924 * IOMMMIORead notification.
9925 */
9926VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9927{
9928 PVMCPU pVCpu = VMMGetCpu(pVM);
9929 if (!pVCpu)
9930 return;
9931 PIEMCPU pIemCpu = &pVCpu->iem.s;
9932 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9933 if (!pEvtRec)
9934 return;
9935 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9936 pEvtRec->u.RamRead.GCPhys = GCPhys;
9937 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9938 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9939 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9940}
9941
9942
9943/**
9944 * IOMMMIOWrite notification.
9945 */
9946VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
9947{
9948 PVMCPU pVCpu = VMMGetCpu(pVM);
9949 if (!pVCpu)
9950 return;
9951 PIEMCPU pIemCpu = &pVCpu->iem.s;
9952 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9953 if (!pEvtRec)
9954 return;
9955 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
9956 pEvtRec->u.RamWrite.GCPhys = GCPhys;
9957 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
9958 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
9959 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
9960 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
9961 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
9962 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9963 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9964}
9965
9966
9967/**
9968 * IOMIOPortRead notification.
9969 */
9970VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
9971{
9972 PVMCPU pVCpu = VMMGetCpu(pVM);
9973 if (!pVCpu)
9974 return;
9975 PIEMCPU pIemCpu = &pVCpu->iem.s;
9976 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9977 if (!pEvtRec)
9978 return;
9979 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9980 pEvtRec->u.IOPortRead.Port = Port;
9981 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9982 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9983 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9984}
9985
9986/**
9987 * IOMIOPortWrite notification.
9988 */
9989VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9990{
9991 PVMCPU pVCpu = VMMGetCpu(pVM);
9992 if (!pVCpu)
9993 return;
9994 PIEMCPU pIemCpu = &pVCpu->iem.s;
9995 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9996 if (!pEvtRec)
9997 return;
9998 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
9999 pEvtRec->u.IOPortWrite.Port = Port;
10000 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10001 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10002 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10003 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10004}
10005
10006
10007VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10008{
10009 AssertFailed();
10010}
10011
10012
10013VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10014{
10015 AssertFailed();
10016}
10017
10018
10019/**
10020 * Fakes and records an I/O port read.
10021 *
10022 * @returns VINF_SUCCESS.
10023 * @param pIemCpu The IEM per CPU data.
10024 * @param Port The I/O port.
10025 * @param pu32Value Where to store the fake value.
10026 * @param cbValue The size of the access.
10027 */
10028IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10029{
10030 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10031 if (pEvtRec)
10032 {
10033 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10034 pEvtRec->u.IOPortRead.Port = Port;
10035 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
10036 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10037 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10038 }
10039 pIemCpu->cIOReads++;
10040 *pu32Value = 0xcccccccc;
10041 return VINF_SUCCESS;
10042}
10043
10044
10045/**
10046 * Fakes and records an I/O port write.
10047 *
10048 * @returns VINF_SUCCESS.
10049 * @param pIemCpu The IEM per CPU data.
10050 * @param Port The I/O port.
10051 * @param u32Value The value being written.
10052 * @param cbValue The size of the access.
10053 */
10054IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10055{
10056 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10057 if (pEvtRec)
10058 {
10059 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10060 pEvtRec->u.IOPortWrite.Port = Port;
10061 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10062 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10063 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10064 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10065 }
10066 pIemCpu->cIOWrites++;
10067 return VINF_SUCCESS;
10068}
10069
10070
10071/**
10072 * Used to add extra details about a stub case.
10073 * @param pIemCpu The IEM per CPU state.
10074 */
10075IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10076{
10077 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10078 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10079 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10080 char szRegs[4096];
10081 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10082 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10083 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10084 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10085 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10086 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10087 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10088 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10089 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10090 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10091 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10092 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10093 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10094 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10095 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10096 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10097 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10098 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10099 " efer=%016VR{efer}\n"
10100 " pat=%016VR{pat}\n"
10101 " sf_mask=%016VR{sf_mask}\n"
10102 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10103 " lstar=%016VR{lstar}\n"
10104 " star=%016VR{star} cstar=%016VR{cstar}\n"
10105 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10106 );
10107
10108 char szInstr1[256];
10109 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10110 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10111 szInstr1, sizeof(szInstr1), NULL);
10112 char szInstr2[256];
10113 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10114 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10115 szInstr2, sizeof(szInstr2), NULL);
10116
10117 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10118}
10119
10120
10121/**
10122 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10123 * dump to the assertion info.
10124 *
10125 * @param pEvtRec The record to dump.
10126 */
10127IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10128{
10129 switch (pEvtRec->enmEvent)
10130 {
10131 case IEMVERIFYEVENT_IOPORT_READ:
10132 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10133 pEvtRec->u.IOPortWrite.Port,
10134 pEvtRec->u.IOPortWrite.cbValue);
10135 break;
10136 case IEMVERIFYEVENT_IOPORT_WRITE:
10137 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10138 pEvtRec->u.IOPortWrite.Port,
10139 pEvtRec->u.IOPortWrite.cbValue,
10140 pEvtRec->u.IOPortWrite.u32Value);
10141 break;
10142 case IEMVERIFYEVENT_RAM_READ:
10143 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10144 pEvtRec->u.RamRead.GCPhys,
10145 pEvtRec->u.RamRead.cb);
10146 break;
10147 case IEMVERIFYEVENT_RAM_WRITE:
10148 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10149 pEvtRec->u.RamWrite.GCPhys,
10150 pEvtRec->u.RamWrite.cb,
10151 (int)pEvtRec->u.RamWrite.cb,
10152 pEvtRec->u.RamWrite.ab);
10153 break;
10154 default:
10155 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10156 break;
10157 }
10158}
10159
10160
10161/**
10162 * Raises an assertion on the specified record, showing the given message with
10163 * a record dump attached.
10164 *
10165 * @param pIemCpu The IEM per CPU data.
10166 * @param pEvtRec1 The first record.
10167 * @param pEvtRec2 The second record.
10168 * @param pszMsg The message explaining why we're asserting.
10169 */
10170IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10171{
10172 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10173 iemVerifyAssertAddRecordDump(pEvtRec1);
10174 iemVerifyAssertAddRecordDump(pEvtRec2);
10175 iemVerifyAssertMsg2(pIemCpu);
10176 RTAssertPanic();
10177}
10178
10179
10180/**
10181 * Raises an assertion on the specified record, showing the given message with
10182 * a record dump attached.
10183 *
10184 * @param pIemCpu The IEM per CPU data.
10185 * @param pEvtRec1 The first record.
10186 * @param pszMsg The message explaining why we're asserting.
10187 */
10188IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10189{
10190 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10191 iemVerifyAssertAddRecordDump(pEvtRec);
10192 iemVerifyAssertMsg2(pIemCpu);
10193 RTAssertPanic();
10194}
10195
10196
10197/**
10198 * Verifies a write record.
10199 *
10200 * @param pIemCpu The IEM per CPU data.
10201 * @param pEvtRec The write record.
10202 * @param fRem Set if REM was doing the other executing. If clear
10203 * it was HM.
10204 */
10205IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10206{
10207 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10208 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10209 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10210 if ( RT_FAILURE(rc)
10211 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10212 {
10213 /* fend off ins */
10214 if ( !pIemCpu->cIOReads
10215 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10216 || ( pEvtRec->u.RamWrite.cb != 1
10217 && pEvtRec->u.RamWrite.cb != 2
10218 && pEvtRec->u.RamWrite.cb != 4) )
10219 {
10220 /* fend off ROMs and MMIO */
10221 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10222 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10223 {
10224 /* fend off fxsave */
10225 if (pEvtRec->u.RamWrite.cb != 512)
10226 {
10227 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10228 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10229 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10230 RTAssertMsg2Add("%s: %.*Rhxs\n"
10231 "iem: %.*Rhxs\n",
10232 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10233 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10234 iemVerifyAssertAddRecordDump(pEvtRec);
10235 iemVerifyAssertMsg2(pIemCpu);
10236 RTAssertPanic();
10237 }
10238 }
10239 }
10240 }
10241
10242}
10243
10244/**
10245 * Performs the post-execution verfication checks.
10246 */
10247IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10248{
10249 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10250 return;
10251
10252 /*
10253 * Switch back the state.
10254 */
10255 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10256 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10257 Assert(pOrgCtx != pDebugCtx);
10258 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10259
10260 /*
10261 * Execute the instruction in REM.
10262 */
10263 bool fRem = false;
10264 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10265 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10266 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10267#ifdef IEM_VERIFICATION_MODE_FULL_HM
10268 if ( HMIsEnabled(pVM)
10269 && pIemCpu->cIOReads == 0
10270 && pIemCpu->cIOWrites == 0
10271 && !pIemCpu->fProblematicMemory)
10272 {
10273 uint64_t uStartRip = pOrgCtx->rip;
10274 unsigned iLoops = 0;
10275 do
10276 {
10277 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10278 iLoops++;
10279 } while ( rc == VINF_SUCCESS
10280 || ( rc == VINF_EM_DBG_STEPPED
10281 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10282 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10283 || ( pOrgCtx->rip != pDebugCtx->rip
10284 && pIemCpu->uInjectCpl != UINT8_MAX
10285 && iLoops < 8) );
10286 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10287 rc = VINF_SUCCESS;
10288 }
10289#endif
10290 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10291 || rc == VINF_IOM_R3_IOPORT_READ
10292 || rc == VINF_IOM_R3_IOPORT_WRITE
10293 || rc == VINF_IOM_R3_MMIO_READ
10294 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10295 || rc == VINF_IOM_R3_MMIO_WRITE
10296 || rc == VINF_CPUM_R3_MSR_READ
10297 || rc == VINF_CPUM_R3_MSR_WRITE
10298 || rc == VINF_EM_RESCHEDULE
10299 )
10300 {
10301 EMRemLock(pVM);
10302 rc = REMR3EmulateInstruction(pVM, pVCpu);
10303 AssertRC(rc);
10304 EMRemUnlock(pVM);
10305 fRem = true;
10306 }
10307
10308 /*
10309 * Compare the register states.
10310 */
10311 unsigned cDiffs = 0;
10312 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10313 {
10314 //Log(("REM and IEM ends up with different registers!\n"));
10315 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10316
10317# define CHECK_FIELD(a_Field) \
10318 do \
10319 { \
10320 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10321 { \
10322 switch (sizeof(pOrgCtx->a_Field)) \
10323 { \
10324 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10325 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10326 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10327 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10328 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10329 } \
10330 cDiffs++; \
10331 } \
10332 } while (0)
10333# define CHECK_XSTATE_FIELD(a_Field) \
10334 do \
10335 { \
10336 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10337 { \
10338 switch (sizeof(pOrgCtx->a_Field)) \
10339 { \
10340 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10341 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10342 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10343 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10344 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10345 } \
10346 cDiffs++; \
10347 } \
10348 } while (0)
10349
10350# define CHECK_BIT_FIELD(a_Field) \
10351 do \
10352 { \
10353 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10354 { \
10355 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10356 cDiffs++; \
10357 } \
10358 } while (0)
10359
10360# define CHECK_SEL(a_Sel) \
10361 do \
10362 { \
10363 CHECK_FIELD(a_Sel.Sel); \
10364 CHECK_FIELD(a_Sel.Attr.u); \
10365 CHECK_FIELD(a_Sel.u64Base); \
10366 CHECK_FIELD(a_Sel.u32Limit); \
10367 CHECK_FIELD(a_Sel.fFlags); \
10368 } while (0)
10369
10370 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10371 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10372
10373#if 1 /* The recompiler doesn't update these the intel way. */
10374 if (fRem)
10375 {
10376 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10377 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10378 pOrgXState->x87.CS = pDebugXState->x87.CS;
10379 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10380 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10381 pOrgXState->x87.DS = pDebugXState->x87.DS;
10382 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10383 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10384 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10385 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10386 }
10387#endif
10388 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10389 {
10390 RTAssertMsg2Weak(" the FPU state differs\n");
10391 cDiffs++;
10392 CHECK_XSTATE_FIELD(x87.FCW);
10393 CHECK_XSTATE_FIELD(x87.FSW);
10394 CHECK_XSTATE_FIELD(x87.FTW);
10395 CHECK_XSTATE_FIELD(x87.FOP);
10396 CHECK_XSTATE_FIELD(x87.FPUIP);
10397 CHECK_XSTATE_FIELD(x87.CS);
10398 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10399 CHECK_XSTATE_FIELD(x87.FPUDP);
10400 CHECK_XSTATE_FIELD(x87.DS);
10401 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10402 CHECK_XSTATE_FIELD(x87.MXCSR);
10403 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10404 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10405 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10406 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10407 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10408 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10409 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10410 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10411 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10412 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10413 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10414 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10415 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10416 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10417 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10418 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10419 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10420 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10421 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10422 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10423 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10424 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10425 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10426 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10427 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10428 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10429 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10430 }
10431 CHECK_FIELD(rip);
10432 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10433 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10434 {
10435 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10436 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10437 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10438 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10439 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10440 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10441 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10442 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10443 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10444 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10445 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10446 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10447 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10448 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10449 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10450 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10451 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10452 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10453 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10454 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10455 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10456 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10457 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10458 }
10459
10460 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10461 CHECK_FIELD(rax);
10462 CHECK_FIELD(rcx);
10463 if (!pIemCpu->fIgnoreRaxRdx)
10464 CHECK_FIELD(rdx);
10465 CHECK_FIELD(rbx);
10466 CHECK_FIELD(rsp);
10467 CHECK_FIELD(rbp);
10468 CHECK_FIELD(rsi);
10469 CHECK_FIELD(rdi);
10470 CHECK_FIELD(r8);
10471 CHECK_FIELD(r9);
10472 CHECK_FIELD(r10);
10473 CHECK_FIELD(r11);
10474 CHECK_FIELD(r12);
10475 CHECK_FIELD(r13);
10476 CHECK_SEL(cs);
10477 CHECK_SEL(ss);
10478 CHECK_SEL(ds);
10479 CHECK_SEL(es);
10480 CHECK_SEL(fs);
10481 CHECK_SEL(gs);
10482 CHECK_FIELD(cr0);
10483
10484 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10485 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10486 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10487 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10488 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10489 {
10490 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10491 { /* ignore */ }
10492 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10493 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10494 && fRem)
10495 { /* ignore */ }
10496 else
10497 CHECK_FIELD(cr2);
10498 }
10499 CHECK_FIELD(cr3);
10500 CHECK_FIELD(cr4);
10501 CHECK_FIELD(dr[0]);
10502 CHECK_FIELD(dr[1]);
10503 CHECK_FIELD(dr[2]);
10504 CHECK_FIELD(dr[3]);
10505 CHECK_FIELD(dr[6]);
10506 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10507 CHECK_FIELD(dr[7]);
10508 CHECK_FIELD(gdtr.cbGdt);
10509 CHECK_FIELD(gdtr.pGdt);
10510 CHECK_FIELD(idtr.cbIdt);
10511 CHECK_FIELD(idtr.pIdt);
10512 CHECK_SEL(ldtr);
10513 CHECK_SEL(tr);
10514 CHECK_FIELD(SysEnter.cs);
10515 CHECK_FIELD(SysEnter.eip);
10516 CHECK_FIELD(SysEnter.esp);
10517 CHECK_FIELD(msrEFER);
10518 CHECK_FIELD(msrSTAR);
10519 CHECK_FIELD(msrPAT);
10520 CHECK_FIELD(msrLSTAR);
10521 CHECK_FIELD(msrCSTAR);
10522 CHECK_FIELD(msrSFMASK);
10523 CHECK_FIELD(msrKERNELGSBASE);
10524
10525 if (cDiffs != 0)
10526 {
10527 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10528 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10529 iemVerifyAssertMsg2(pIemCpu);
10530 RTAssertPanic();
10531 }
10532# undef CHECK_FIELD
10533# undef CHECK_BIT_FIELD
10534 }
10535
10536 /*
10537 * If the register state compared fine, check the verification event
10538 * records.
10539 */
10540 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10541 {
10542 /*
10543 * Compare verficiation event records.
10544 * - I/O port accesses should be a 1:1 match.
10545 */
10546 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10547 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10548 while (pIemRec && pOtherRec)
10549 {
10550 /* Since we might miss RAM writes and reads, ignore reads and check
10551 that any written memory is the same extra ones. */
10552 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10553 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10554 && pIemRec->pNext)
10555 {
10556 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10557 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10558 pIemRec = pIemRec->pNext;
10559 }
10560
10561 /* Do the compare. */
10562 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10563 {
10564 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10565 break;
10566 }
10567 bool fEquals;
10568 switch (pIemRec->enmEvent)
10569 {
10570 case IEMVERIFYEVENT_IOPORT_READ:
10571 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10572 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10573 break;
10574 case IEMVERIFYEVENT_IOPORT_WRITE:
10575 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10576 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10577 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10578 break;
10579 case IEMVERIFYEVENT_RAM_READ:
10580 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10581 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10582 break;
10583 case IEMVERIFYEVENT_RAM_WRITE:
10584 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10585 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10586 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10587 break;
10588 default:
10589 fEquals = false;
10590 break;
10591 }
10592 if (!fEquals)
10593 {
10594 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10595 break;
10596 }
10597
10598 /* advance */
10599 pIemRec = pIemRec->pNext;
10600 pOtherRec = pOtherRec->pNext;
10601 }
10602
10603 /* Ignore extra writes and reads. */
10604 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10605 {
10606 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10607 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10608 pIemRec = pIemRec->pNext;
10609 }
10610 if (pIemRec != NULL)
10611 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10612 else if (pOtherRec != NULL)
10613 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10614 }
10615 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10616}
10617
10618#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10619
10620/* stubs */
10621IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10622{
10623 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10624 return VERR_INTERNAL_ERROR;
10625}
10626
10627IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10628{
10629 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10630 return VERR_INTERNAL_ERROR;
10631}
10632
10633#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10634
10635
10636#ifdef LOG_ENABLED
10637/**
10638 * Logs the current instruction.
10639 * @param pVCpu The cross context virtual CPU structure of the caller.
10640 * @param pCtx The current CPU context.
10641 * @param fSameCtx Set if we have the same context information as the VMM,
10642 * clear if we may have already executed an instruction in
10643 * our debug context. When clear, we assume IEMCPU holds
10644 * valid CPU mode info.
10645 */
10646IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10647{
10648# ifdef IN_RING3
10649 if (LogIs2Enabled())
10650 {
10651 char szInstr[256];
10652 uint32_t cbInstr = 0;
10653 if (fSameCtx)
10654 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10655 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10656 szInstr, sizeof(szInstr), &cbInstr);
10657 else
10658 {
10659 uint32_t fFlags = 0;
10660 switch (pVCpu->iem.s.enmCpuMode)
10661 {
10662 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10663 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10664 case IEMMODE_16BIT:
10665 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10666 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10667 else
10668 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10669 break;
10670 }
10671 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10672 szInstr, sizeof(szInstr), &cbInstr);
10673 }
10674
10675 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10676 Log2(("****\n"
10677 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10678 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10679 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10680 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10681 " %s\n"
10682 ,
10683 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10684 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10685 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10686 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10687 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10688 szInstr));
10689
10690 if (LogIs3Enabled())
10691 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10692 }
10693 else
10694# endif
10695 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10696 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10697}
10698#endif
10699
10700
10701/**
10702 * Makes status code addjustments (pass up from I/O and access handler)
10703 * as well as maintaining statistics.
10704 *
10705 * @returns Strict VBox status code to pass up.
10706 * @param pIemCpu The IEM per CPU data.
10707 * @param rcStrict The status from executing an instruction.
10708 */
10709DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10710{
10711 if (rcStrict != VINF_SUCCESS)
10712 {
10713 if (RT_SUCCESS(rcStrict))
10714 {
10715 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10716 || rcStrict == VINF_IOM_R3_IOPORT_READ
10717 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10718 || rcStrict == VINF_IOM_R3_MMIO_READ
10719 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10720 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10721 || rcStrict == VINF_CPUM_R3_MSR_READ
10722 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10723 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10724 || rcStrict == VINF_EM_RAW_TO_R3
10725 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
10726 /* raw-mode / virt handlers only: */
10727 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10728 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10729 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10730 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10731 || rcStrict == VINF_SELM_SYNC_GDT
10732 || rcStrict == VINF_CSAM_PENDING_ACTION
10733 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10734 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10735/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10736 int32_t const rcPassUp = pIemCpu->rcPassUp;
10737 if (rcPassUp == VINF_SUCCESS)
10738 pIemCpu->cRetInfStatuses++;
10739 else if ( rcPassUp < VINF_EM_FIRST
10740 || rcPassUp > VINF_EM_LAST
10741 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10742 {
10743 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10744 pIemCpu->cRetPassUpStatus++;
10745 rcStrict = rcPassUp;
10746 }
10747 else
10748 {
10749 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10750 pIemCpu->cRetInfStatuses++;
10751 }
10752 }
10753 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10754 pIemCpu->cRetAspectNotImplemented++;
10755 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10756 pIemCpu->cRetInstrNotImplemented++;
10757#ifdef IEM_VERIFICATION_MODE_FULL
10758 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10759 rcStrict = VINF_SUCCESS;
10760#endif
10761 else
10762 pIemCpu->cRetErrStatuses++;
10763 }
10764 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10765 {
10766 pIemCpu->cRetPassUpStatus++;
10767 rcStrict = pIemCpu->rcPassUp;
10768 }
10769
10770 return rcStrict;
10771}
10772
10773
10774/**
10775 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10776 * IEMExecOneWithPrefetchedByPC.
10777 *
10778 * @return Strict VBox status code.
10779 * @param pVCpu The current virtual CPU.
10780 * @param pIemCpu The IEM per CPU data.
10781 * @param fExecuteInhibit If set, execute the instruction following CLI,
10782 * POP SS and MOV SS,GR.
10783 */
10784DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10785{
10786 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10787 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10788 if (rcStrict == VINF_SUCCESS)
10789 pIemCpu->cInstructions++;
10790 if (pIemCpu->cActiveMappings > 0)
10791 iemMemRollback(pIemCpu);
10792//#ifdef DEBUG
10793// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10794//#endif
10795
10796 /* Execute the next instruction as well if a cli, pop ss or
10797 mov ss, Gr has just completed successfully. */
10798 if ( fExecuteInhibit
10799 && rcStrict == VINF_SUCCESS
10800 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10801 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10802 {
10803 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10804 if (rcStrict == VINF_SUCCESS)
10805 {
10806# ifdef LOG_ENABLED
10807 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10808# endif
10809 IEM_OPCODE_GET_NEXT_U8(&b);
10810 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10811 if (rcStrict == VINF_SUCCESS)
10812 pIemCpu->cInstructions++;
10813 if (pIemCpu->cActiveMappings > 0)
10814 iemMemRollback(pIemCpu);
10815 }
10816 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10817 }
10818
10819 /*
10820 * Return value fiddling, statistics and sanity assertions.
10821 */
10822 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10823
10824 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10825 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10826#if defined(IEM_VERIFICATION_MODE_FULL)
10827 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10828 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10829 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10830 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10831#endif
10832 return rcStrict;
10833}
10834
10835
10836#ifdef IN_RC
10837/**
10838 * Re-enters raw-mode or ensure we return to ring-3.
10839 *
10840 * @returns rcStrict, maybe modified.
10841 * @param pIemCpu The IEM CPU structure.
10842 * @param pVCpu The cross context virtual CPU structure of the caller.
10843 * @param pCtx The current CPU context.
10844 * @param rcStrict The status code returne by the interpreter.
10845 */
10846DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10847{
10848 if (!pIemCpu->fInPatchCode)
10849 CPUMRawEnter(pVCpu);
10850 return rcStrict;
10851}
10852#endif
10853
10854
10855/**
10856 * Execute one instruction.
10857 *
10858 * @return Strict VBox status code.
10859 * @param pVCpu The current virtual CPU.
10860 */
10861VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10862{
10863 PIEMCPU pIemCpu = &pVCpu->iem.s;
10864
10865#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10866 iemExecVerificationModeSetup(pIemCpu);
10867#endif
10868#ifdef LOG_ENABLED
10869 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10870 iemLogCurInstr(pVCpu, pCtx, true);
10871#endif
10872
10873 /*
10874 * Do the decoding and emulation.
10875 */
10876 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10877 if (rcStrict == VINF_SUCCESS)
10878 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10879
10880#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10881 /*
10882 * Assert some sanity.
10883 */
10884 iemExecVerificationModeCheck(pIemCpu);
10885#endif
10886#ifdef IN_RC
10887 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10888#endif
10889 if (rcStrict != VINF_SUCCESS)
10890 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10891 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10892 return rcStrict;
10893}
10894
10895
10896VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10897{
10898 PIEMCPU pIemCpu = &pVCpu->iem.s;
10899 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10900 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10901
10902 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10903 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10904 if (rcStrict == VINF_SUCCESS)
10905 {
10906 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10907 if (pcbWritten)
10908 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10909 }
10910
10911#ifdef IN_RC
10912 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10913#endif
10914 return rcStrict;
10915}
10916
10917
10918VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10919 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10920{
10921 PIEMCPU pIemCpu = &pVCpu->iem.s;
10922 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10923 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10924
10925 VBOXSTRICTRC rcStrict;
10926 if ( cbOpcodeBytes
10927 && pCtx->rip == OpcodeBytesPC)
10928 {
10929 iemInitDecoder(pIemCpu, false);
10930 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10931 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10932 rcStrict = VINF_SUCCESS;
10933 }
10934 else
10935 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10936 if (rcStrict == VINF_SUCCESS)
10937 {
10938 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10939 }
10940
10941#ifdef IN_RC
10942 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10943#endif
10944 return rcStrict;
10945}
10946
10947
10948VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10949{
10950 PIEMCPU pIemCpu = &pVCpu->iem.s;
10951 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10952 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10953
10954 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10955 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10956 if (rcStrict == VINF_SUCCESS)
10957 {
10958 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10959 if (pcbWritten)
10960 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10961 }
10962
10963#ifdef IN_RC
10964 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10965#endif
10966 return rcStrict;
10967}
10968
10969
10970VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10971 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10972{
10973 PIEMCPU pIemCpu = &pVCpu->iem.s;
10974 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10975 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10976
10977 VBOXSTRICTRC rcStrict;
10978 if ( cbOpcodeBytes
10979 && pCtx->rip == OpcodeBytesPC)
10980 {
10981 iemInitDecoder(pIemCpu, true);
10982 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10983 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10984 rcStrict = VINF_SUCCESS;
10985 }
10986 else
10987 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10988 if (rcStrict == VINF_SUCCESS)
10989 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10990
10991#ifdef IN_RC
10992 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10993#endif
10994 return rcStrict;
10995}
10996
10997
10998VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
10999{
11000 PIEMCPU pIemCpu = &pVCpu->iem.s;
11001
11002 /*
11003 * See if there is an interrupt pending in TRPM and inject it if we can.
11004 */
11005#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11006 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11007# ifdef IEM_VERIFICATION_MODE_FULL
11008 pIemCpu->uInjectCpl = UINT8_MAX;
11009# endif
11010 if ( pCtx->eflags.Bits.u1IF
11011 && TRPMHasTrap(pVCpu)
11012 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11013 {
11014 uint8_t u8TrapNo;
11015 TRPMEVENT enmType;
11016 RTGCUINT uErrCode;
11017 RTGCPTR uCr2;
11018 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11019 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11020 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11021 TRPMResetTrap(pVCpu);
11022 }
11023#else
11024 iemExecVerificationModeSetup(pIemCpu);
11025 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11026#endif
11027
11028 /*
11029 * Log the state.
11030 */
11031#ifdef LOG_ENABLED
11032 iemLogCurInstr(pVCpu, pCtx, true);
11033#endif
11034
11035 /*
11036 * Do the decoding and emulation.
11037 */
11038 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11039 if (rcStrict == VINF_SUCCESS)
11040 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11041
11042#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11043 /*
11044 * Assert some sanity.
11045 */
11046 iemExecVerificationModeCheck(pIemCpu);
11047#endif
11048
11049 /*
11050 * Maybe re-enter raw-mode and log.
11051 */
11052#ifdef IN_RC
11053 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11054#endif
11055 if (rcStrict != VINF_SUCCESS)
11056 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11057 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11058 return rcStrict;
11059}
11060
11061
11062
11063/**
11064 * Injects a trap, fault, abort, software interrupt or external interrupt.
11065 *
11066 * The parameter list matches TRPMQueryTrapAll pretty closely.
11067 *
11068 * @returns Strict VBox status code.
11069 * @param pVCpu The current virtual CPU.
11070 * @param u8TrapNo The trap number.
11071 * @param enmType What type is it (trap/fault/abort), software
11072 * interrupt or hardware interrupt.
11073 * @param uErrCode The error code if applicable.
11074 * @param uCr2 The CR2 value if applicable.
11075 * @param cbInstr The instruction length (only relevant for
11076 * software interrupts).
11077 */
11078VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11079 uint8_t cbInstr)
11080{
11081 iemInitDecoder(&pVCpu->iem.s, false);
11082#ifdef DBGFTRACE_ENABLED
11083 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11084 u8TrapNo, enmType, uErrCode, uCr2);
11085#endif
11086
11087 uint32_t fFlags;
11088 switch (enmType)
11089 {
11090 case TRPM_HARDWARE_INT:
11091 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11092 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11093 uErrCode = uCr2 = 0;
11094 break;
11095
11096 case TRPM_SOFTWARE_INT:
11097 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11098 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11099 uErrCode = uCr2 = 0;
11100 break;
11101
11102 case TRPM_TRAP:
11103 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11104 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11105 if (u8TrapNo == X86_XCPT_PF)
11106 fFlags |= IEM_XCPT_FLAGS_CR2;
11107 switch (u8TrapNo)
11108 {
11109 case X86_XCPT_DF:
11110 case X86_XCPT_TS:
11111 case X86_XCPT_NP:
11112 case X86_XCPT_SS:
11113 case X86_XCPT_PF:
11114 case X86_XCPT_AC:
11115 fFlags |= IEM_XCPT_FLAGS_ERR;
11116 break;
11117
11118 case X86_XCPT_NMI:
11119 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11120 break;
11121 }
11122 break;
11123
11124 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11125 }
11126
11127 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11128}
11129
11130
11131/**
11132 * Injects the active TRPM event.
11133 *
11134 * @returns Strict VBox status code.
11135 * @param pVCpu Pointer to the VMCPU.
11136 */
11137VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11138{
11139#ifndef IEM_IMPLEMENTS_TASKSWITCH
11140 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11141#else
11142 uint8_t u8TrapNo;
11143 TRPMEVENT enmType;
11144 RTGCUINT uErrCode;
11145 RTGCUINTPTR uCr2;
11146 uint8_t cbInstr;
11147 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11148 if (RT_FAILURE(rc))
11149 return rc;
11150
11151 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11152
11153 /** @todo Are there any other codes that imply the event was successfully
11154 * delivered to the guest? See @bugref{6607}. */
11155 if ( rcStrict == VINF_SUCCESS
11156 || rcStrict == VINF_IEM_RAISED_XCPT)
11157 {
11158 TRPMResetTrap(pVCpu);
11159 }
11160 return rcStrict;
11161#endif
11162}
11163
11164
11165VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11166{
11167 return VERR_NOT_IMPLEMENTED;
11168}
11169
11170
11171VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11172{
11173 return VERR_NOT_IMPLEMENTED;
11174}
11175
11176
11177#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11178/**
11179 * Executes a IRET instruction with default operand size.
11180 *
11181 * This is for PATM.
11182 *
11183 * @returns VBox status code.
11184 * @param pVCpu The current virtual CPU.
11185 * @param pCtxCore The register frame.
11186 */
11187VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11188{
11189 PIEMCPU pIemCpu = &pVCpu->iem.s;
11190 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11191
11192 iemCtxCoreToCtx(pCtx, pCtxCore);
11193 iemInitDecoder(pIemCpu);
11194 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11195 if (rcStrict == VINF_SUCCESS)
11196 iemCtxToCtxCore(pCtxCore, pCtx);
11197 else
11198 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11199 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11200 return rcStrict;
11201}
11202#endif
11203
11204
11205/**
11206 * Macro used by the IEMExec* method to check the given instruction length.
11207 *
11208 * Will return on failure!
11209 *
11210 * @param a_cbInstr The given instruction length.
11211 * @param a_cbMin The minimum length.
11212 */
11213#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11214 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11215 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11216
11217
11218/**
11219 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11220 *
11221 * This API ASSUMES that the caller has already verified that the guest code is
11222 * allowed to access the I/O port. (The I/O port is in the DX register in the
11223 * guest state.)
11224 *
11225 * @returns Strict VBox status code.
11226 * @param pVCpu The cross context per virtual CPU structure.
11227 * @param cbValue The size of the I/O port access (1, 2, or 4).
11228 * @param enmAddrMode The addressing mode.
11229 * @param fRepPrefix Indicates whether a repeat prefix is used
11230 * (doesn't matter which for this instruction).
11231 * @param cbInstr The instruction length in bytes.
11232 * @param iEffSeg The effective segment address.
11233 */
11234VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11235 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11236{
11237 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11238 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11239
11240 /*
11241 * State init.
11242 */
11243 PIEMCPU pIemCpu = &pVCpu->iem.s;
11244 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11245
11246 /*
11247 * Switch orgy for getting to the right handler.
11248 */
11249 VBOXSTRICTRC rcStrict;
11250 if (fRepPrefix)
11251 {
11252 switch (enmAddrMode)
11253 {
11254 case IEMMODE_16BIT:
11255 switch (cbValue)
11256 {
11257 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11258 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11259 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11260 default:
11261 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11262 }
11263 break;
11264
11265 case IEMMODE_32BIT:
11266 switch (cbValue)
11267 {
11268 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11269 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11270 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11271 default:
11272 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11273 }
11274 break;
11275
11276 case IEMMODE_64BIT:
11277 switch (cbValue)
11278 {
11279 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11280 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11281 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11282 default:
11283 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11284 }
11285 break;
11286
11287 default:
11288 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11289 }
11290 }
11291 else
11292 {
11293 switch (enmAddrMode)
11294 {
11295 case IEMMODE_16BIT:
11296 switch (cbValue)
11297 {
11298 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11299 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11300 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11301 default:
11302 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11303 }
11304 break;
11305
11306 case IEMMODE_32BIT:
11307 switch (cbValue)
11308 {
11309 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11310 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11311 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11312 default:
11313 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11314 }
11315 break;
11316
11317 case IEMMODE_64BIT:
11318 switch (cbValue)
11319 {
11320 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11321 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11322 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11323 default:
11324 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11325 }
11326 break;
11327
11328 default:
11329 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11330 }
11331 }
11332
11333 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11334}
11335
11336
11337/**
11338 * Interface for HM and EM for executing string I/O IN (read) instructions.
11339 *
11340 * This API ASSUMES that the caller has already verified that the guest code is
11341 * allowed to access the I/O port. (The I/O port is in the DX register in the
11342 * guest state.)
11343 *
11344 * @returns Strict VBox status code.
11345 * @param pVCpu The cross context per virtual CPU structure.
11346 * @param cbValue The size of the I/O port access (1, 2, or 4).
11347 * @param enmAddrMode The addressing mode.
11348 * @param fRepPrefix Indicates whether a repeat prefix is used
11349 * (doesn't matter which for this instruction).
11350 * @param cbInstr The instruction length in bytes.
11351 */
11352VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11353 bool fRepPrefix, uint8_t cbInstr)
11354{
11355 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11356
11357 /*
11358 * State init.
11359 */
11360 PIEMCPU pIemCpu = &pVCpu->iem.s;
11361 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11362
11363 /*
11364 * Switch orgy for getting to the right handler.
11365 */
11366 VBOXSTRICTRC rcStrict;
11367 if (fRepPrefix)
11368 {
11369 switch (enmAddrMode)
11370 {
11371 case IEMMODE_16BIT:
11372 switch (cbValue)
11373 {
11374 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11375 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11376 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11377 default:
11378 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11379 }
11380 break;
11381
11382 case IEMMODE_32BIT:
11383 switch (cbValue)
11384 {
11385 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11386 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11387 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11388 default:
11389 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11390 }
11391 break;
11392
11393 case IEMMODE_64BIT:
11394 switch (cbValue)
11395 {
11396 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11397 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11398 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11399 default:
11400 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11401 }
11402 break;
11403
11404 default:
11405 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11406 }
11407 }
11408 else
11409 {
11410 switch (enmAddrMode)
11411 {
11412 case IEMMODE_16BIT:
11413 switch (cbValue)
11414 {
11415 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11416 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11417 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11418 default:
11419 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11420 }
11421 break;
11422
11423 case IEMMODE_32BIT:
11424 switch (cbValue)
11425 {
11426 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11427 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11428 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11429 default:
11430 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11431 }
11432 break;
11433
11434 case IEMMODE_64BIT:
11435 switch (cbValue)
11436 {
11437 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11438 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11439 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11440 default:
11441 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11442 }
11443 break;
11444
11445 default:
11446 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11447 }
11448 }
11449
11450 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11451}
11452
11453
11454
11455/**
11456 * Interface for HM and EM to write to a CRx register.
11457 *
11458 * @returns Strict VBox status code.
11459 * @param pVCpu The cross context per virtual CPU structure.
11460 * @param cbInstr The instruction length in bytes.
11461 * @param iCrReg The control register number (destination).
11462 * @param iGReg The general purpose register number (source).
11463 *
11464 * @remarks In ring-0 not all of the state needs to be synced in.
11465 */
11466VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11467{
11468 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11469 Assert(iCrReg < 16);
11470 Assert(iGReg < 16);
11471
11472 PIEMCPU pIemCpu = &pVCpu->iem.s;
11473 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11474 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11475 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11476}
11477
11478
11479/**
11480 * Interface for HM and EM to read from a CRx register.
11481 *
11482 * @returns Strict VBox status code.
11483 * @param pVCpu The cross context per virtual CPU structure.
11484 * @param cbInstr The instruction length in bytes.
11485 * @param iGReg The general purpose register number (destination).
11486 * @param iCrReg The control register number (source).
11487 *
11488 * @remarks In ring-0 not all of the state needs to be synced in.
11489 */
11490VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11491{
11492 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11493 Assert(iCrReg < 16);
11494 Assert(iGReg < 16);
11495
11496 PIEMCPU pIemCpu = &pVCpu->iem.s;
11497 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11498 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11499 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11500}
11501
11502
11503/**
11504 * Interface for HM and EM to clear the CR0[TS] bit.
11505 *
11506 * @returns Strict VBox status code.
11507 * @param pVCpu The cross context per virtual CPU structure.
11508 * @param cbInstr The instruction length in bytes.
11509 *
11510 * @remarks In ring-0 not all of the state needs to be synced in.
11511 */
11512VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11513{
11514 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11515
11516 PIEMCPU pIemCpu = &pVCpu->iem.s;
11517 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11518 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11519 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11520}
11521
11522
11523/**
11524 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11525 *
11526 * @returns Strict VBox status code.
11527 * @param pVCpu The cross context per virtual CPU structure.
11528 * @param cbInstr The instruction length in bytes.
11529 * @param uValue The value to load into CR0.
11530 *
11531 * @remarks In ring-0 not all of the state needs to be synced in.
11532 */
11533VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11534{
11535 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11536
11537 PIEMCPU pIemCpu = &pVCpu->iem.s;
11538 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11539 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11540 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11541}
11542
11543
11544/**
11545 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11546 *
11547 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11548 *
11549 * @returns Strict VBox status code.
11550 * @param pVCpu The cross context per virtual CPU structure of the
11551 * calling EMT.
11552 * @param cbInstr The instruction length in bytes.
11553 * @remarks In ring-0 not all of the state needs to be synced in.
11554 * @threads EMT(pVCpu)
11555 */
11556VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11557{
11558 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11559
11560 PIEMCPU pIemCpu = &pVCpu->iem.s;
11561 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11562 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11563 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11564}
11565
11566#ifdef IN_RING3
11567
11568/**
11569 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11570 *
11571 * @returns Merge between @a rcStrict and what the commit operation returned.
11572 * @param pVCpu Pointer to the cross context CPU structure for the
11573 * calling EMT.
11574 * @param rcStrict The status code returned by ring-0 or raw-mode.
11575 */
11576VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3DoPendingAction(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
11577{
11578 PIEMCPU pIemCpu = &pVCpu->iem.s;
11579
11580 /*
11581 * Retrieve and reset the pending commit.
11582 */
11583 IEMCOMMIT const enmFn = pIemCpu->PendingCommit.enmFn;
11584 pIemCpu->PendingCommit.enmFn = IEMCOMMIT_INVALID;
11585 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11586
11587 /*
11588 * Must reset pass-up status code.
11589 */
11590 pIemCpu->rcPassUp = VINF_SUCCESS;
11591
11592 /*
11593 * Call the function. Currently using switch here instead of function
11594 * pointer table as a switch won't get skewed.
11595 */
11596 VBOXSTRICTRC rcStrictCommit;
11597 switch (enmFn)
11598 {
11599 case IEMCOMMIT_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11600 case IEMCOMMIT_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11601 case IEMCOMMIT_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11602 case IEMCOMMIT_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11603 case IEMCOMMIT_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11604 case IEMCOMMIT_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11605 case IEMCOMMIT_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11606 case IEMCOMMIT_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11607 case IEMCOMMIT_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11608 case IEMCOMMIT_REP_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11609 case IEMCOMMIT_REP_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11610 case IEMCOMMIT_REP_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11611 case IEMCOMMIT_REP_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11612 case IEMCOMMIT_REP_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11613 case IEMCOMMIT_REP_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11614 case IEMCOMMIT_REP_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11615 case IEMCOMMIT_REP_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11616 case IEMCOMMIT_REP_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11617 default:
11618 AssertLogRelMsgFailedReturn(("enmFn=%#x (%d)\n", pIemCpu->PendingCommit.enmFn, pIemCpu->PendingCommit.enmFn), VERR_IEM_IPE_2);
11619 }
11620
11621 /*
11622 * Merge status code (if any) with the incomming one.
11623 */
11624 rcStrictCommit = iemExecStatusCodeFiddling(pIemCpu, rcStrictCommit);
11625 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11626 return rcStrict;
11627 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11628 return rcStrictCommit;
11629
11630 /* Complicated. */
11631 if (RT_FAILURE(rcStrict))
11632 return rcStrict;
11633 if (RT_FAILURE(rcStrictCommit))
11634 return rcStrictCommit;
11635 if ( rcStrict >= VINF_EM_FIRST
11636 && rcStrict <= VINF_EM_LAST)
11637 {
11638 if ( rcStrictCommit >= VINF_EM_FIRST
11639 && rcStrictCommit <= VINF_EM_LAST)
11640 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11641
11642 /* This really shouldn't happen. Check PGM + handler code! */
11643 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_1);
11644 }
11645 /* This shouldn't really happen either, see IOM_SUCCESS. */
11646 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_2);
11647}
11648
11649#endif /* IN_RING */
11650
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette