VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 56061

Last change on this file since 56061 was 56061, checked in by vboxsync, 10 years ago

IEMAll.cpp: return code adjusted assertion.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 436.5 KB
Line 
1/* $Id: IEMAll.cpp 56061 2015-05-25 15:44:38Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78#define IEM_IMPLEMENTS_TASKSWITCH
79
80/*******************************************************************************
81* Header Files *
82*******************************************************************************/
83#define LOG_GROUP LOG_GROUP_IEM
84#include <VBox/vmm/iem.h>
85#include <VBox/vmm/cpum.h>
86#include <VBox/vmm/pdm.h>
87#include <VBox/vmm/pgm.h>
88#include <internal/pgm.h>
89#include <VBox/vmm/iom.h>
90#include <VBox/vmm/em.h>
91#include <VBox/vmm/hm.h>
92#include <VBox/vmm/tm.h>
93#include <VBox/vmm/dbgf.h>
94#include <VBox/vmm/dbgftrace.h>
95#ifdef VBOX_WITH_RAW_MODE_NOT_R0
96# include <VBox/vmm/patm.h>
97# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
98# include <VBox/vmm/csam.h>
99# endif
100#endif
101#include "IEMInternal.h"
102#ifdef IEM_VERIFICATION_MODE_FULL
103# include <VBox/vmm/rem.h>
104# include <VBox/vmm/mm.h>
105#endif
106#include <VBox/vmm/vm.h>
107#include <VBox/log.h>
108#include <VBox/err.h>
109#include <VBox/param.h>
110#include <VBox/dis.h>
111#include <VBox/disopcode.h>
112#include <iprt/assert.h>
113#include <iprt/string.h>
114#include <iprt/x86.h>
115
116
117
118/*******************************************************************************
119* Structures and Typedefs *
120*******************************************************************************/
121/** @typedef PFNIEMOP
122 * Pointer to an opcode decoder function.
123 */
124
125/** @def FNIEMOP_DEF
126 * Define an opcode decoder function.
127 *
128 * We're using macors for this so that adding and removing parameters as well as
129 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
130 *
131 * @param a_Name The function name.
132 */
133
134
135#if defined(__GNUC__) && defined(RT_ARCH_X86)
136typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
137# define FNIEMOP_DEF(a_Name) \
138 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
139# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
140 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
141# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
142 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
143
144#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
145typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
146# define FNIEMOP_DEF(a_Name) \
147 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
148# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
149 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
150# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
151 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
152
153#elif defined(__GNUC__)
154typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#else
163typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
170
171#endif
172
173
174/**
175 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
176 */
177typedef union IEMSELDESC
178{
179 /** The legacy view. */
180 X86DESC Legacy;
181 /** The long mode view. */
182 X86DESC64 Long;
183} IEMSELDESC;
184/** Pointer to a selector descriptor table entry. */
185typedef IEMSELDESC *PIEMSELDESC;
186
187
188/*******************************************************************************
189* Defined Constants And Macros *
190*******************************************************************************/
191/** Temporary hack to disable the double execution. Will be removed in favor
192 * of a dedicated execution mode in EM. */
193//#define IEM_VERIFICATION_MODE_NO_REM
194
195/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
196 * due to GCC lacking knowledge about the value range of a switch. */
197#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
198
199/**
200 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
201 * occation.
202 */
203#ifdef LOG_ENABLED
204# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
205 do { \
206 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
207 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
208 } while (0)
209#else
210# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
212#endif
213
214/**
215 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
216 * occation using the supplied logger statement.
217 *
218 * @param a_LoggerArgs What to log on failure.
219 */
220#ifdef LOG_ENABLED
221# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
222 do { \
223 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
224 /*LogFunc(a_LoggerArgs);*/ \
225 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
226 } while (0)
227#else
228# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
230#endif
231
232/**
233 * Call an opcode decoder function.
234 *
235 * We're using macors for this so that adding and removing parameters can be
236 * done as we please. See FNIEMOP_DEF.
237 */
238#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
239
240/**
241 * Call a common opcode decoder function taking one extra argument.
242 *
243 * We're using macors for this so that adding and removing parameters can be
244 * done as we please. See FNIEMOP_DEF_1.
245 */
246#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
247
248/**
249 * Call a common opcode decoder function taking one extra argument.
250 *
251 * We're using macors for this so that adding and removing parameters can be
252 * done as we please. See FNIEMOP_DEF_1.
253 */
254#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
255
256/**
257 * Check if we're currently executing in real or virtual 8086 mode.
258 *
259 * @returns @c true if it is, @c false if not.
260 * @param a_pIemCpu The IEM state of the current CPU.
261 */
262#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
263
264/**
265 * Check if we're currently executing in virtual 8086 mode.
266 *
267 * @returns @c true if it is, @c false if not.
268 * @param a_pIemCpu The IEM state of the current CPU.
269 */
270#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
271
272/**
273 * Check if we're currently executing in long mode.
274 *
275 * @returns @c true if it is, @c false if not.
276 * @param a_pIemCpu The IEM state of the current CPU.
277 */
278#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
279
280/**
281 * Check if we're currently executing in real mode.
282 *
283 * @returns @c true if it is, @c false if not.
284 * @param a_pIemCpu The IEM state of the current CPU.
285 */
286#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
287
288/**
289 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
290 * @returns PCCPUMFEATURES
291 * @param a_pIemCpu The IEM state of the current CPU.
292 */
293#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
294
295/**
296 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
297 * @returns PCCPUMFEATURES
298 * @param a_pIemCpu The IEM state of the current CPU.
299 */
300#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
301
302/**
303 * Evaluates to true if we're presenting an Intel CPU to the guest.
304 */
305#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
306
307/**
308 * Evaluates to true if we're presenting an AMD CPU to the guest.
309 */
310#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
311
312/**
313 * Check if the address is canonical.
314 */
315#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
316
317
318/*******************************************************************************
319* Global Variables *
320*******************************************************************************/
321extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
322
323
324/** Function table for the ADD instruction. */
325IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
326{
327 iemAImpl_add_u8, iemAImpl_add_u8_locked,
328 iemAImpl_add_u16, iemAImpl_add_u16_locked,
329 iemAImpl_add_u32, iemAImpl_add_u32_locked,
330 iemAImpl_add_u64, iemAImpl_add_u64_locked
331};
332
333/** Function table for the ADC instruction. */
334IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
335{
336 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
337 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
338 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
339 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
340};
341
342/** Function table for the SUB instruction. */
343IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
344{
345 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
346 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
347 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
348 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
349};
350
351/** Function table for the SBB instruction. */
352IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
353{
354 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
355 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
356 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
357 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
358};
359
360/** Function table for the OR instruction. */
361IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
362{
363 iemAImpl_or_u8, iemAImpl_or_u8_locked,
364 iemAImpl_or_u16, iemAImpl_or_u16_locked,
365 iemAImpl_or_u32, iemAImpl_or_u32_locked,
366 iemAImpl_or_u64, iemAImpl_or_u64_locked
367};
368
369/** Function table for the XOR instruction. */
370IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
371{
372 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
373 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
374 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
375 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
376};
377
378/** Function table for the AND instruction. */
379IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
380{
381 iemAImpl_and_u8, iemAImpl_and_u8_locked,
382 iemAImpl_and_u16, iemAImpl_and_u16_locked,
383 iemAImpl_and_u32, iemAImpl_and_u32_locked,
384 iemAImpl_and_u64, iemAImpl_and_u64_locked
385};
386
387/** Function table for the CMP instruction.
388 * @remarks Making operand order ASSUMPTIONS.
389 */
390IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
391{
392 iemAImpl_cmp_u8, NULL,
393 iemAImpl_cmp_u16, NULL,
394 iemAImpl_cmp_u32, NULL,
395 iemAImpl_cmp_u64, NULL
396};
397
398/** Function table for the TEST instruction.
399 * @remarks Making operand order ASSUMPTIONS.
400 */
401IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
402{
403 iemAImpl_test_u8, NULL,
404 iemAImpl_test_u16, NULL,
405 iemAImpl_test_u32, NULL,
406 iemAImpl_test_u64, NULL
407};
408
409/** Function table for the BT instruction. */
410IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
411{
412 NULL, NULL,
413 iemAImpl_bt_u16, NULL,
414 iemAImpl_bt_u32, NULL,
415 iemAImpl_bt_u64, NULL
416};
417
418/** Function table for the BTC instruction. */
419IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
420{
421 NULL, NULL,
422 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
423 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
424 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
425};
426
427/** Function table for the BTR instruction. */
428IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
429{
430 NULL, NULL,
431 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
432 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
433 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
434};
435
436/** Function table for the BTS instruction. */
437IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
438{
439 NULL, NULL,
440 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
441 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
442 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
443};
444
445/** Function table for the BSF instruction. */
446IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
447{
448 NULL, NULL,
449 iemAImpl_bsf_u16, NULL,
450 iemAImpl_bsf_u32, NULL,
451 iemAImpl_bsf_u64, NULL
452};
453
454/** Function table for the BSR instruction. */
455IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
456{
457 NULL, NULL,
458 iemAImpl_bsr_u16, NULL,
459 iemAImpl_bsr_u32, NULL,
460 iemAImpl_bsr_u64, NULL
461};
462
463/** Function table for the IMUL instruction. */
464IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
465{
466 NULL, NULL,
467 iemAImpl_imul_two_u16, NULL,
468 iemAImpl_imul_two_u32, NULL,
469 iemAImpl_imul_two_u64, NULL
470};
471
472/** Group 1 /r lookup table. */
473IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
474{
475 &g_iemAImpl_add,
476 &g_iemAImpl_or,
477 &g_iemAImpl_adc,
478 &g_iemAImpl_sbb,
479 &g_iemAImpl_and,
480 &g_iemAImpl_sub,
481 &g_iemAImpl_xor,
482 &g_iemAImpl_cmp
483};
484
485/** Function table for the INC instruction. */
486IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
487{
488 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
489 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
490 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
491 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
492};
493
494/** Function table for the DEC instruction. */
495IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
496{
497 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
498 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
499 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
500 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
501};
502
503/** Function table for the NEG instruction. */
504IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
505{
506 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
507 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
508 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
509 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
510};
511
512/** Function table for the NOT instruction. */
513IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
514{
515 iemAImpl_not_u8, iemAImpl_not_u8_locked,
516 iemAImpl_not_u16, iemAImpl_not_u16_locked,
517 iemAImpl_not_u32, iemAImpl_not_u32_locked,
518 iemAImpl_not_u64, iemAImpl_not_u64_locked
519};
520
521
522/** Function table for the ROL instruction. */
523IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
524{
525 iemAImpl_rol_u8,
526 iemAImpl_rol_u16,
527 iemAImpl_rol_u32,
528 iemAImpl_rol_u64
529};
530
531/** Function table for the ROR instruction. */
532IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
533{
534 iemAImpl_ror_u8,
535 iemAImpl_ror_u16,
536 iemAImpl_ror_u32,
537 iemAImpl_ror_u64
538};
539
540/** Function table for the RCL instruction. */
541IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
542{
543 iemAImpl_rcl_u8,
544 iemAImpl_rcl_u16,
545 iemAImpl_rcl_u32,
546 iemAImpl_rcl_u64
547};
548
549/** Function table for the RCR instruction. */
550IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
551{
552 iemAImpl_rcr_u8,
553 iemAImpl_rcr_u16,
554 iemAImpl_rcr_u32,
555 iemAImpl_rcr_u64
556};
557
558/** Function table for the SHL instruction. */
559IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
560{
561 iemAImpl_shl_u8,
562 iemAImpl_shl_u16,
563 iemAImpl_shl_u32,
564 iemAImpl_shl_u64
565};
566
567/** Function table for the SHR instruction. */
568IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
569{
570 iemAImpl_shr_u8,
571 iemAImpl_shr_u16,
572 iemAImpl_shr_u32,
573 iemAImpl_shr_u64
574};
575
576/** Function table for the SAR instruction. */
577IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
578{
579 iemAImpl_sar_u8,
580 iemAImpl_sar_u16,
581 iemAImpl_sar_u32,
582 iemAImpl_sar_u64
583};
584
585
586/** Function table for the MUL instruction. */
587IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
588{
589 iemAImpl_mul_u8,
590 iemAImpl_mul_u16,
591 iemAImpl_mul_u32,
592 iemAImpl_mul_u64
593};
594
595/** Function table for the IMUL instruction working implicitly on rAX. */
596IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
597{
598 iemAImpl_imul_u8,
599 iemAImpl_imul_u16,
600 iemAImpl_imul_u32,
601 iemAImpl_imul_u64
602};
603
604/** Function table for the DIV instruction. */
605IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
606{
607 iemAImpl_div_u8,
608 iemAImpl_div_u16,
609 iemAImpl_div_u32,
610 iemAImpl_div_u64
611};
612
613/** Function table for the MUL instruction. */
614IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
615{
616 iemAImpl_idiv_u8,
617 iemAImpl_idiv_u16,
618 iemAImpl_idiv_u32,
619 iemAImpl_idiv_u64
620};
621
622/** Function table for the SHLD instruction */
623IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
624{
625 iemAImpl_shld_u16,
626 iemAImpl_shld_u32,
627 iemAImpl_shld_u64,
628};
629
630/** Function table for the SHRD instruction */
631IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
632{
633 iemAImpl_shrd_u16,
634 iemAImpl_shrd_u32,
635 iemAImpl_shrd_u64,
636};
637
638
639/** Function table for the PUNPCKLBW instruction */
640IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
641/** Function table for the PUNPCKLBD instruction */
642IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
643/** Function table for the PUNPCKLDQ instruction */
644IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
645/** Function table for the PUNPCKLQDQ instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
647
648/** Function table for the PUNPCKHBW instruction */
649IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
650/** Function table for the PUNPCKHBD instruction */
651IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
652/** Function table for the PUNPCKHDQ instruction */
653IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
654/** Function table for the PUNPCKHQDQ instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
656
657/** Function table for the PXOR instruction */
658IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
659/** Function table for the PCMPEQB instruction */
660IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
661/** Function table for the PCMPEQW instruction */
662IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
663/** Function table for the PCMPEQD instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
665
666
667#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
668/** What IEM just wrote. */
669uint8_t g_abIemWrote[256];
670/** How much IEM just wrote. */
671size_t g_cbIemWrote;
672#endif
673
674
675/*******************************************************************************
676* Internal Functions *
677*******************************************************************************/
678IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
679IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
680IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
681IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
682/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
683IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
684IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
685IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
686IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
687IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
688IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
689IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
691IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
692IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
693IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
694IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
695IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
696IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
698IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
699IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
700IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
701IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
704IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
705IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
706IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
707IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
708IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
709IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
710IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
711
712#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
713IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
714#endif
715IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
716IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
717
718
719
720/**
721 * Sets the pass up status.
722 *
723 * @returns VINF_SUCCESS.
724 * @param pIemCpu The per CPU IEM state of the calling thread.
725 * @param rcPassUp The pass up status. Must be informational.
726 * VINF_SUCCESS is not allowed.
727 */
728IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
729{
730 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
731
732 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
733 if (rcOldPassUp == VINF_SUCCESS)
734 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
735 /* If both are EM scheduling codes, use EM priority rules. */
736 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
737 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
738 {
739 if (rcPassUp < rcOldPassUp)
740 {
741 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
742 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
743 }
744 else
745 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
746 }
747 /* Override EM scheduling with specific status code. */
748 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
749 {
750 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
751 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
752 }
753 /* Don't override specific status code, first come first served. */
754 else
755 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
756 return VINF_SUCCESS;
757}
758
759
760/**
761 * Initializes the execution state.
762 *
763 * @param pIemCpu The per CPU IEM state.
764 * @param fBypassHandlers Whether to bypass access handlers.
765 */
766DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
767{
768 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
769 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
770
771#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
772 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
773 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
774 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
775 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
776 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
778 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
779 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
780#endif
781
782#ifdef VBOX_WITH_RAW_MODE_NOT_R0
783 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
784#endif
785 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
786 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
787 ? IEMMODE_64BIT
788 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
789 ? IEMMODE_32BIT
790 : IEMMODE_16BIT;
791 pIemCpu->enmCpuMode = enmMode;
792#ifdef VBOX_STRICT
793 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
794 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
795 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
796 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
797 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
798 pIemCpu->uRexReg = 127;
799 pIemCpu->uRexB = 127;
800 pIemCpu->uRexIndex = 127;
801 pIemCpu->iEffSeg = 127;
802 pIemCpu->offOpcode = 127;
803 pIemCpu->cbOpcode = 127;
804#endif
805
806 pIemCpu->cActiveMappings = 0;
807 pIemCpu->iNextMapping = 0;
808 pIemCpu->rcPassUp = VINF_SUCCESS;
809 pIemCpu->fBypassHandlers = fBypassHandlers;
810#ifdef VBOX_WITH_RAW_MODE_NOT_R0
811 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
812 && pCtx->cs.u64Base == 0
813 && pCtx->cs.u32Limit == UINT32_MAX
814 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
815 if (!pIemCpu->fInPatchCode)
816 CPUMRawLeave(pVCpu, VINF_SUCCESS);
817#endif
818}
819
820
821/**
822 * Initializes the decoder state.
823 *
824 * @param pIemCpu The per CPU IEM state.
825 * @param fBypassHandlers Whether to bypass access handlers.
826 */
827DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
828{
829 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
830 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
831
832#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
833 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
834 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
835 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
836 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
837 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
838 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
839 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
840 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
841#endif
842
843#ifdef VBOX_WITH_RAW_MODE_NOT_R0
844 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
845#endif
846 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
847#ifdef IEM_VERIFICATION_MODE_FULL
848 if (pIemCpu->uInjectCpl != UINT8_MAX)
849 pIemCpu->uCpl = pIemCpu->uInjectCpl;
850#endif
851 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
852 ? IEMMODE_64BIT
853 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
854 ? IEMMODE_32BIT
855 : IEMMODE_16BIT;
856 pIemCpu->enmCpuMode = enmMode;
857 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
858 pIemCpu->enmEffAddrMode = enmMode;
859 if (enmMode != IEMMODE_64BIT)
860 {
861 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
862 pIemCpu->enmEffOpSize = enmMode;
863 }
864 else
865 {
866 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
867 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
868 }
869 pIemCpu->fPrefixes = 0;
870 pIemCpu->uRexReg = 0;
871 pIemCpu->uRexB = 0;
872 pIemCpu->uRexIndex = 0;
873 pIemCpu->iEffSeg = X86_SREG_DS;
874 pIemCpu->offOpcode = 0;
875 pIemCpu->cbOpcode = 0;
876 pIemCpu->cActiveMappings = 0;
877 pIemCpu->iNextMapping = 0;
878 pIemCpu->rcPassUp = VINF_SUCCESS;
879 pIemCpu->fBypassHandlers = fBypassHandlers;
880#ifdef VBOX_WITH_RAW_MODE_NOT_R0
881 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
882 && pCtx->cs.u64Base == 0
883 && pCtx->cs.u32Limit == UINT32_MAX
884 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
885 if (!pIemCpu->fInPatchCode)
886 CPUMRawLeave(pVCpu, VINF_SUCCESS);
887#endif
888
889#ifdef DBGFTRACE_ENABLED
890 switch (enmMode)
891 {
892 case IEMMODE_64BIT:
893 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
894 break;
895 case IEMMODE_32BIT:
896 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
897 break;
898 case IEMMODE_16BIT:
899 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
900 break;
901 }
902#endif
903}
904
905
906/**
907 * Prefetch opcodes the first time when starting executing.
908 *
909 * @returns Strict VBox status code.
910 * @param pIemCpu The IEM state.
911 * @param fBypassHandlers Whether to bypass access handlers.
912 */
913IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
914{
915#ifdef IEM_VERIFICATION_MODE_FULL
916 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
917#endif
918 iemInitDecoder(pIemCpu, fBypassHandlers);
919
920 /*
921 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
922 *
923 * First translate CS:rIP to a physical address.
924 */
925 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
926 uint32_t cbToTryRead;
927 RTGCPTR GCPtrPC;
928 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
929 {
930 cbToTryRead = PAGE_SIZE;
931 GCPtrPC = pCtx->rip;
932 if (!IEM_IS_CANONICAL(GCPtrPC))
933 return iemRaiseGeneralProtectionFault0(pIemCpu);
934 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
935 }
936 else
937 {
938 uint32_t GCPtrPC32 = pCtx->eip;
939 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
940 if (GCPtrPC32 > pCtx->cs.u32Limit)
941 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
942 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
943 if (!cbToTryRead) /* overflowed */
944 {
945 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
946 cbToTryRead = UINT32_MAX;
947 }
948 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
949 Assert(GCPtrPC <= UINT32_MAX);
950 }
951
952#ifdef VBOX_WITH_RAW_MODE_NOT_R0
953 /* Allow interpretation of patch manager code blocks since they can for
954 instance throw #PFs for perfectly good reasons. */
955 if (pIemCpu->fInPatchCode)
956 {
957 size_t cbRead = 0;
958 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
959 AssertRCReturn(rc, rc);
960 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
961 return VINF_SUCCESS;
962 }
963#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
964
965 RTGCPHYS GCPhys;
966 uint64_t fFlags;
967 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
968 if (RT_FAILURE(rc))
969 {
970 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
971 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
972 }
973 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
974 {
975 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
976 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
977 }
978 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
979 {
980 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
981 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
982 }
983 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
984 /** @todo Check reserved bits and such stuff. PGM is better at doing
985 * that, so do it when implementing the guest virtual address
986 * TLB... */
987
988#ifdef IEM_VERIFICATION_MODE_FULL
989 /*
990 * Optimistic optimization: Use unconsumed opcode bytes from the previous
991 * instruction.
992 */
993 /** @todo optimize this differently by not using PGMPhysRead. */
994 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
995 pIemCpu->GCPhysOpcodes = GCPhys;
996 if ( offPrevOpcodes < cbOldOpcodes
997 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
998 {
999 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1000 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1001 pIemCpu->cbOpcode = cbNew;
1002 return VINF_SUCCESS;
1003 }
1004#endif
1005
1006 /*
1007 * Read the bytes at this address.
1008 */
1009 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1010#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1011 size_t cbActual;
1012 if ( PATMIsEnabled(pVM)
1013 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1014 {
1015 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1016 Assert(cbActual > 0);
1017 pIemCpu->cbOpcode = (uint8_t)cbActual;
1018 }
1019 else
1020#endif
1021 {
1022 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1023 if (cbToTryRead > cbLeftOnPage)
1024 cbToTryRead = cbLeftOnPage;
1025 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1026 cbToTryRead = sizeof(pIemCpu->abOpcode);
1027
1028 if (!pIemCpu->fBypassHandlers)
1029 {
1030 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1031 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1032 { /* likely */ }
1033 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1034 {
1035 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1036 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1037 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1038 }
1039 else
1040 {
1041 Log((RT_SUCCESS(rcStrict)
1042 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1043 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1044 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1045 return rcStrict;
1046 }
1047 }
1048 else
1049 {
1050 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1051 if (RT_SUCCESS(rc))
1052 { /* likely */ }
1053 else
1054 {
1055 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1056 GCPtrPC, GCPhys, rc, cbToTryRead));
1057 return rc;
1058 }
1059 }
1060 pIemCpu->cbOpcode = cbToTryRead;
1061 }
1062
1063 return VINF_SUCCESS;
1064}
1065
1066
1067/**
1068 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1069 * exception if it fails.
1070 *
1071 * @returns Strict VBox status code.
1072 * @param pIemCpu The IEM state.
1073 * @param cbMin The minimum number of bytes relative offOpcode
1074 * that must be read.
1075 */
1076IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1077{
1078 /*
1079 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1080 *
1081 * First translate CS:rIP to a physical address.
1082 */
1083 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1084 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1085 uint32_t cbToTryRead;
1086 RTGCPTR GCPtrNext;
1087 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1088 {
1089 cbToTryRead = PAGE_SIZE;
1090 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1091 if (!IEM_IS_CANONICAL(GCPtrNext))
1092 return iemRaiseGeneralProtectionFault0(pIemCpu);
1093 }
1094 else
1095 {
1096 uint32_t GCPtrNext32 = pCtx->eip;
1097 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1098 GCPtrNext32 += pIemCpu->cbOpcode;
1099 if (GCPtrNext32 > pCtx->cs.u32Limit)
1100 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1101 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1102 if (!cbToTryRead) /* overflowed */
1103 {
1104 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1105 cbToTryRead = UINT32_MAX;
1106 /** @todo check out wrapping around the code segment. */
1107 }
1108 if (cbToTryRead < cbMin - cbLeft)
1109 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1110 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1111 }
1112
1113 /* Only read up to the end of the page, and make sure we don't read more
1114 than the opcode buffer can hold. */
1115 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1116 if (cbToTryRead > cbLeftOnPage)
1117 cbToTryRead = cbLeftOnPage;
1118 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1119 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1120/** @todo r=bird: Convert assertion into undefined opcode exception? */
1121 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1122
1123#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1124 /* Allow interpretation of patch manager code blocks since they can for
1125 instance throw #PFs for perfectly good reasons. */
1126 if (pIemCpu->fInPatchCode)
1127 {
1128 size_t cbRead = 0;
1129 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1130 AssertRCReturn(rc, rc);
1131 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1132 return VINF_SUCCESS;
1133 }
1134#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1135
1136 RTGCPHYS GCPhys;
1137 uint64_t fFlags;
1138 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1139 if (RT_FAILURE(rc))
1140 {
1141 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1142 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1143 }
1144 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1145 {
1146 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1147 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1148 }
1149 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1150 {
1151 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1152 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1153 }
1154 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1155 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1156 /** @todo Check reserved bits and such stuff. PGM is better at doing
1157 * that, so do it when implementing the guest virtual address
1158 * TLB... */
1159
1160 /*
1161 * Read the bytes at this address.
1162 *
1163 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1164 * and since PATM should only patch the start of an instruction there
1165 * should be no need to check again here.
1166 */
1167 if (!pIemCpu->fBypassHandlers)
1168 {
1169 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1170 cbToTryRead, PGMACCESSORIGIN_IEM);
1171 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1172 { /* likely */ }
1173 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1174 {
1175 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1176 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1177 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1178 }
1179 else
1180 {
1181 Log((RT_SUCCESS(rcStrict)
1182 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1183 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1184 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1185 return rcStrict;
1186 }
1187 }
1188 else
1189 {
1190 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1191 if (RT_SUCCESS(rc))
1192 { /* likely */ }
1193 else
1194 {
1195 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1196 return rc;
1197 }
1198 }
1199 pIemCpu->cbOpcode += cbToTryRead;
1200 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1201
1202 return VINF_SUCCESS;
1203}
1204
1205
1206/**
1207 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1208 *
1209 * @returns Strict VBox status code.
1210 * @param pIemCpu The IEM state.
1211 * @param pb Where to return the opcode byte.
1212 */
1213DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1214{
1215 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1216 if (rcStrict == VINF_SUCCESS)
1217 {
1218 uint8_t offOpcode = pIemCpu->offOpcode;
1219 *pb = pIemCpu->abOpcode[offOpcode];
1220 pIemCpu->offOpcode = offOpcode + 1;
1221 }
1222 else
1223 *pb = 0;
1224 return rcStrict;
1225}
1226
1227
1228/**
1229 * Fetches the next opcode byte.
1230 *
1231 * @returns Strict VBox status code.
1232 * @param pIemCpu The IEM state.
1233 * @param pu8 Where to return the opcode byte.
1234 */
1235DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1236{
1237 uint8_t const offOpcode = pIemCpu->offOpcode;
1238 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1239 {
1240 *pu8 = pIemCpu->abOpcode[offOpcode];
1241 pIemCpu->offOpcode = offOpcode + 1;
1242 return VINF_SUCCESS;
1243 }
1244 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1245}
1246
1247
1248/**
1249 * Fetches the next opcode byte, returns automatically on failure.
1250 *
1251 * @param a_pu8 Where to return the opcode byte.
1252 * @remark Implicitly references pIemCpu.
1253 */
1254#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1255 do \
1256 { \
1257 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1258 if (rcStrict2 != VINF_SUCCESS) \
1259 return rcStrict2; \
1260 } while (0)
1261
1262
1263/**
1264 * Fetches the next signed byte from the opcode stream.
1265 *
1266 * @returns Strict VBox status code.
1267 * @param pIemCpu The IEM state.
1268 * @param pi8 Where to return the signed byte.
1269 */
1270DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1271{
1272 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1273}
1274
1275
1276/**
1277 * Fetches the next signed byte from the opcode stream, returning automatically
1278 * on failure.
1279 *
1280 * @param pi8 Where to return the signed byte.
1281 * @remark Implicitly references pIemCpu.
1282 */
1283#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1284 do \
1285 { \
1286 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1287 if (rcStrict2 != VINF_SUCCESS) \
1288 return rcStrict2; \
1289 } while (0)
1290
1291
1292/**
1293 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1294 *
1295 * @returns Strict VBox status code.
1296 * @param pIemCpu The IEM state.
1297 * @param pu16 Where to return the opcode dword.
1298 */
1299DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1300{
1301 uint8_t u8;
1302 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1303 if (rcStrict == VINF_SUCCESS)
1304 *pu16 = (int8_t)u8;
1305 return rcStrict;
1306}
1307
1308
1309/**
1310 * Fetches the next signed byte from the opcode stream, extending it to
1311 * unsigned 16-bit.
1312 *
1313 * @returns Strict VBox status code.
1314 * @param pIemCpu The IEM state.
1315 * @param pu16 Where to return the unsigned word.
1316 */
1317DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1318{
1319 uint8_t const offOpcode = pIemCpu->offOpcode;
1320 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1321 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1322
1323 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1324 pIemCpu->offOpcode = offOpcode + 1;
1325 return VINF_SUCCESS;
1326}
1327
1328
1329/**
1330 * Fetches the next signed byte from the opcode stream and sign-extending it to
1331 * a word, returning automatically on failure.
1332 *
1333 * @param pu16 Where to return the word.
1334 * @remark Implicitly references pIemCpu.
1335 */
1336#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1337 do \
1338 { \
1339 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1340 if (rcStrict2 != VINF_SUCCESS) \
1341 return rcStrict2; \
1342 } while (0)
1343
1344
1345/**
1346 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1347 *
1348 * @returns Strict VBox status code.
1349 * @param pIemCpu The IEM state.
1350 * @param pu32 Where to return the opcode dword.
1351 */
1352DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1353{
1354 uint8_t u8;
1355 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1356 if (rcStrict == VINF_SUCCESS)
1357 *pu32 = (int8_t)u8;
1358 return rcStrict;
1359}
1360
1361
1362/**
1363 * Fetches the next signed byte from the opcode stream, extending it to
1364 * unsigned 32-bit.
1365 *
1366 * @returns Strict VBox status code.
1367 * @param pIemCpu The IEM state.
1368 * @param pu32 Where to return the unsigned dword.
1369 */
1370DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1371{
1372 uint8_t const offOpcode = pIemCpu->offOpcode;
1373 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1374 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1375
1376 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1377 pIemCpu->offOpcode = offOpcode + 1;
1378 return VINF_SUCCESS;
1379}
1380
1381
1382/**
1383 * Fetches the next signed byte from the opcode stream and sign-extending it to
1384 * a word, returning automatically on failure.
1385 *
1386 * @param pu32 Where to return the word.
1387 * @remark Implicitly references pIemCpu.
1388 */
1389#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1390 do \
1391 { \
1392 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1393 if (rcStrict2 != VINF_SUCCESS) \
1394 return rcStrict2; \
1395 } while (0)
1396
1397
1398/**
1399 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pIemCpu The IEM state.
1403 * @param pu64 Where to return the opcode qword.
1404 */
1405DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1406{
1407 uint8_t u8;
1408 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1409 if (rcStrict == VINF_SUCCESS)
1410 *pu64 = (int8_t)u8;
1411 return rcStrict;
1412}
1413
1414
1415/**
1416 * Fetches the next signed byte from the opcode stream, extending it to
1417 * unsigned 64-bit.
1418 *
1419 * @returns Strict VBox status code.
1420 * @param pIemCpu The IEM state.
1421 * @param pu64 Where to return the unsigned qword.
1422 */
1423DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1424{
1425 uint8_t const offOpcode = pIemCpu->offOpcode;
1426 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1427 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1428
1429 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1430 pIemCpu->offOpcode = offOpcode + 1;
1431 return VINF_SUCCESS;
1432}
1433
1434
1435/**
1436 * Fetches the next signed byte from the opcode stream and sign-extending it to
1437 * a word, returning automatically on failure.
1438 *
1439 * @param pu64 Where to return the word.
1440 * @remark Implicitly references pIemCpu.
1441 */
1442#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1443 do \
1444 { \
1445 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1446 if (rcStrict2 != VINF_SUCCESS) \
1447 return rcStrict2; \
1448 } while (0)
1449
1450
1451/**
1452 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1453 *
1454 * @returns Strict VBox status code.
1455 * @param pIemCpu The IEM state.
1456 * @param pu16 Where to return the opcode word.
1457 */
1458DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1459{
1460 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1461 if (rcStrict == VINF_SUCCESS)
1462 {
1463 uint8_t offOpcode = pIemCpu->offOpcode;
1464 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1465 pIemCpu->offOpcode = offOpcode + 2;
1466 }
1467 else
1468 *pu16 = 0;
1469 return rcStrict;
1470}
1471
1472
1473/**
1474 * Fetches the next opcode word.
1475 *
1476 * @returns Strict VBox status code.
1477 * @param pIemCpu The IEM state.
1478 * @param pu16 Where to return the opcode word.
1479 */
1480DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1481{
1482 uint8_t const offOpcode = pIemCpu->offOpcode;
1483 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1484 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1485
1486 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1487 pIemCpu->offOpcode = offOpcode + 2;
1488 return VINF_SUCCESS;
1489}
1490
1491
1492/**
1493 * Fetches the next opcode word, returns automatically on failure.
1494 *
1495 * @param a_pu16 Where to return the opcode word.
1496 * @remark Implicitly references pIemCpu.
1497 */
1498#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1499 do \
1500 { \
1501 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1502 if (rcStrict2 != VINF_SUCCESS) \
1503 return rcStrict2; \
1504 } while (0)
1505
1506
1507/**
1508 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1509 *
1510 * @returns Strict VBox status code.
1511 * @param pIemCpu The IEM state.
1512 * @param pu32 Where to return the opcode double word.
1513 */
1514DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1515{
1516 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1517 if (rcStrict == VINF_SUCCESS)
1518 {
1519 uint8_t offOpcode = pIemCpu->offOpcode;
1520 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1521 pIemCpu->offOpcode = offOpcode + 2;
1522 }
1523 else
1524 *pu32 = 0;
1525 return rcStrict;
1526}
1527
1528
1529/**
1530 * Fetches the next opcode word, zero extending it to a double word.
1531 *
1532 * @returns Strict VBox status code.
1533 * @param pIemCpu The IEM state.
1534 * @param pu32 Where to return the opcode double word.
1535 */
1536DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1537{
1538 uint8_t const offOpcode = pIemCpu->offOpcode;
1539 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1540 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1541
1542 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1543 pIemCpu->offOpcode = offOpcode + 2;
1544 return VINF_SUCCESS;
1545}
1546
1547
1548/**
1549 * Fetches the next opcode word and zero extends it to a double word, returns
1550 * automatically on failure.
1551 *
1552 * @param a_pu32 Where to return the opcode double word.
1553 * @remark Implicitly references pIemCpu.
1554 */
1555#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1556 do \
1557 { \
1558 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1559 if (rcStrict2 != VINF_SUCCESS) \
1560 return rcStrict2; \
1561 } while (0)
1562
1563
1564/**
1565 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1566 *
1567 * @returns Strict VBox status code.
1568 * @param pIemCpu The IEM state.
1569 * @param pu64 Where to return the opcode quad word.
1570 */
1571DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1572{
1573 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1574 if (rcStrict == VINF_SUCCESS)
1575 {
1576 uint8_t offOpcode = pIemCpu->offOpcode;
1577 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1578 pIemCpu->offOpcode = offOpcode + 2;
1579 }
1580 else
1581 *pu64 = 0;
1582 return rcStrict;
1583}
1584
1585
1586/**
1587 * Fetches the next opcode word, zero extending it to a quad word.
1588 *
1589 * @returns Strict VBox status code.
1590 * @param pIemCpu The IEM state.
1591 * @param pu64 Where to return the opcode quad word.
1592 */
1593DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1594{
1595 uint8_t const offOpcode = pIemCpu->offOpcode;
1596 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1597 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1598
1599 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1600 pIemCpu->offOpcode = offOpcode + 2;
1601 return VINF_SUCCESS;
1602}
1603
1604
1605/**
1606 * Fetches the next opcode word and zero extends it to a quad word, returns
1607 * automatically on failure.
1608 *
1609 * @param a_pu64 Where to return the opcode quad word.
1610 * @remark Implicitly references pIemCpu.
1611 */
1612#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1613 do \
1614 { \
1615 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1616 if (rcStrict2 != VINF_SUCCESS) \
1617 return rcStrict2; \
1618 } while (0)
1619
1620
1621/**
1622 * Fetches the next signed word from the opcode stream.
1623 *
1624 * @returns Strict VBox status code.
1625 * @param pIemCpu The IEM state.
1626 * @param pi16 Where to return the signed word.
1627 */
1628DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1629{
1630 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1631}
1632
1633
1634/**
1635 * Fetches the next signed word from the opcode stream, returning automatically
1636 * on failure.
1637 *
1638 * @param pi16 Where to return the signed word.
1639 * @remark Implicitly references pIemCpu.
1640 */
1641#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1642 do \
1643 { \
1644 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1645 if (rcStrict2 != VINF_SUCCESS) \
1646 return rcStrict2; \
1647 } while (0)
1648
1649
1650/**
1651 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1652 *
1653 * @returns Strict VBox status code.
1654 * @param pIemCpu The IEM state.
1655 * @param pu32 Where to return the opcode dword.
1656 */
1657DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1658{
1659 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1660 if (rcStrict == VINF_SUCCESS)
1661 {
1662 uint8_t offOpcode = pIemCpu->offOpcode;
1663 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1664 pIemCpu->abOpcode[offOpcode + 1],
1665 pIemCpu->abOpcode[offOpcode + 2],
1666 pIemCpu->abOpcode[offOpcode + 3]);
1667 pIemCpu->offOpcode = offOpcode + 4;
1668 }
1669 else
1670 *pu32 = 0;
1671 return rcStrict;
1672}
1673
1674
1675/**
1676 * Fetches the next opcode dword.
1677 *
1678 * @returns Strict VBox status code.
1679 * @param pIemCpu The IEM state.
1680 * @param pu32 Where to return the opcode double word.
1681 */
1682DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1683{
1684 uint8_t const offOpcode = pIemCpu->offOpcode;
1685 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1686 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1687
1688 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1689 pIemCpu->abOpcode[offOpcode + 1],
1690 pIemCpu->abOpcode[offOpcode + 2],
1691 pIemCpu->abOpcode[offOpcode + 3]);
1692 pIemCpu->offOpcode = offOpcode + 4;
1693 return VINF_SUCCESS;
1694}
1695
1696
1697/**
1698 * Fetches the next opcode dword, returns automatically on failure.
1699 *
1700 * @param a_pu32 Where to return the opcode dword.
1701 * @remark Implicitly references pIemCpu.
1702 */
1703#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1704 do \
1705 { \
1706 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1707 if (rcStrict2 != VINF_SUCCESS) \
1708 return rcStrict2; \
1709 } while (0)
1710
1711
1712/**
1713 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1714 *
1715 * @returns Strict VBox status code.
1716 * @param pIemCpu The IEM state.
1717 * @param pu32 Where to return the opcode dword.
1718 */
1719DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1720{
1721 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1722 if (rcStrict == VINF_SUCCESS)
1723 {
1724 uint8_t offOpcode = pIemCpu->offOpcode;
1725 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1726 pIemCpu->abOpcode[offOpcode + 1],
1727 pIemCpu->abOpcode[offOpcode + 2],
1728 pIemCpu->abOpcode[offOpcode + 3]);
1729 pIemCpu->offOpcode = offOpcode + 4;
1730 }
1731 else
1732 *pu64 = 0;
1733 return rcStrict;
1734}
1735
1736
1737/**
1738 * Fetches the next opcode dword, zero extending it to a quad word.
1739 *
1740 * @returns Strict VBox status code.
1741 * @param pIemCpu The IEM state.
1742 * @param pu64 Where to return the opcode quad word.
1743 */
1744DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1745{
1746 uint8_t const offOpcode = pIemCpu->offOpcode;
1747 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1748 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1749
1750 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1751 pIemCpu->abOpcode[offOpcode + 1],
1752 pIemCpu->abOpcode[offOpcode + 2],
1753 pIemCpu->abOpcode[offOpcode + 3]);
1754 pIemCpu->offOpcode = offOpcode + 4;
1755 return VINF_SUCCESS;
1756}
1757
1758
1759/**
1760 * Fetches the next opcode dword and zero extends it to a quad word, returns
1761 * automatically on failure.
1762 *
1763 * @param a_pu64 Where to return the opcode quad word.
1764 * @remark Implicitly references pIemCpu.
1765 */
1766#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1767 do \
1768 { \
1769 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1770 if (rcStrict2 != VINF_SUCCESS) \
1771 return rcStrict2; \
1772 } while (0)
1773
1774
1775/**
1776 * Fetches the next signed double word from the opcode stream.
1777 *
1778 * @returns Strict VBox status code.
1779 * @param pIemCpu The IEM state.
1780 * @param pi32 Where to return the signed double word.
1781 */
1782DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1783{
1784 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1785}
1786
1787/**
1788 * Fetches the next signed double word from the opcode stream, returning
1789 * automatically on failure.
1790 *
1791 * @param pi32 Where to return the signed double word.
1792 * @remark Implicitly references pIemCpu.
1793 */
1794#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1795 do \
1796 { \
1797 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1798 if (rcStrict2 != VINF_SUCCESS) \
1799 return rcStrict2; \
1800 } while (0)
1801
1802
1803/**
1804 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1805 *
1806 * @returns Strict VBox status code.
1807 * @param pIemCpu The IEM state.
1808 * @param pu64 Where to return the opcode qword.
1809 */
1810DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1811{
1812 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1813 if (rcStrict == VINF_SUCCESS)
1814 {
1815 uint8_t offOpcode = pIemCpu->offOpcode;
1816 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1817 pIemCpu->abOpcode[offOpcode + 1],
1818 pIemCpu->abOpcode[offOpcode + 2],
1819 pIemCpu->abOpcode[offOpcode + 3]);
1820 pIemCpu->offOpcode = offOpcode + 4;
1821 }
1822 else
1823 *pu64 = 0;
1824 return rcStrict;
1825}
1826
1827
1828/**
1829 * Fetches the next opcode dword, sign extending it into a quad word.
1830 *
1831 * @returns Strict VBox status code.
1832 * @param pIemCpu The IEM state.
1833 * @param pu64 Where to return the opcode quad word.
1834 */
1835DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1836{
1837 uint8_t const offOpcode = pIemCpu->offOpcode;
1838 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1839 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1840
1841 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1842 pIemCpu->abOpcode[offOpcode + 1],
1843 pIemCpu->abOpcode[offOpcode + 2],
1844 pIemCpu->abOpcode[offOpcode + 3]);
1845 *pu64 = i32;
1846 pIemCpu->offOpcode = offOpcode + 4;
1847 return VINF_SUCCESS;
1848}
1849
1850
1851/**
1852 * Fetches the next opcode double word and sign extends it to a quad word,
1853 * returns automatically on failure.
1854 *
1855 * @param a_pu64 Where to return the opcode quad word.
1856 * @remark Implicitly references pIemCpu.
1857 */
1858#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1859 do \
1860 { \
1861 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1862 if (rcStrict2 != VINF_SUCCESS) \
1863 return rcStrict2; \
1864 } while (0)
1865
1866
1867/**
1868 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1869 *
1870 * @returns Strict VBox status code.
1871 * @param pIemCpu The IEM state.
1872 * @param pu64 Where to return the opcode qword.
1873 */
1874DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1875{
1876 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1877 if (rcStrict == VINF_SUCCESS)
1878 {
1879 uint8_t offOpcode = pIemCpu->offOpcode;
1880 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1881 pIemCpu->abOpcode[offOpcode + 1],
1882 pIemCpu->abOpcode[offOpcode + 2],
1883 pIemCpu->abOpcode[offOpcode + 3],
1884 pIemCpu->abOpcode[offOpcode + 4],
1885 pIemCpu->abOpcode[offOpcode + 5],
1886 pIemCpu->abOpcode[offOpcode + 6],
1887 pIemCpu->abOpcode[offOpcode + 7]);
1888 pIemCpu->offOpcode = offOpcode + 8;
1889 }
1890 else
1891 *pu64 = 0;
1892 return rcStrict;
1893}
1894
1895
1896/**
1897 * Fetches the next opcode qword.
1898 *
1899 * @returns Strict VBox status code.
1900 * @param pIemCpu The IEM state.
1901 * @param pu64 Where to return the opcode qword.
1902 */
1903DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1904{
1905 uint8_t const offOpcode = pIemCpu->offOpcode;
1906 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1907 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1908
1909 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1910 pIemCpu->abOpcode[offOpcode + 1],
1911 pIemCpu->abOpcode[offOpcode + 2],
1912 pIemCpu->abOpcode[offOpcode + 3],
1913 pIemCpu->abOpcode[offOpcode + 4],
1914 pIemCpu->abOpcode[offOpcode + 5],
1915 pIemCpu->abOpcode[offOpcode + 6],
1916 pIemCpu->abOpcode[offOpcode + 7]);
1917 pIemCpu->offOpcode = offOpcode + 8;
1918 return VINF_SUCCESS;
1919}
1920
1921
1922/**
1923 * Fetches the next opcode quad word, returns automatically on failure.
1924 *
1925 * @param a_pu64 Where to return the opcode quad word.
1926 * @remark Implicitly references pIemCpu.
1927 */
1928#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1929 do \
1930 { \
1931 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1932 if (rcStrict2 != VINF_SUCCESS) \
1933 return rcStrict2; \
1934 } while (0)
1935
1936
1937/** @name Misc Worker Functions.
1938 * @{
1939 */
1940
1941
1942/**
1943 * Validates a new SS segment.
1944 *
1945 * @returns VBox strict status code.
1946 * @param pIemCpu The IEM per CPU instance data.
1947 * @param pCtx The CPU context.
1948 * @param NewSS The new SS selctor.
1949 * @param uCpl The CPL to load the stack for.
1950 * @param pDesc Where to return the descriptor.
1951 */
1952IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1953{
1954 NOREF(pCtx);
1955
1956 /* Null selectors are not allowed (we're not called for dispatching
1957 interrupts with SS=0 in long mode). */
1958 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1959 {
1960 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #TS(0)\n", NewSS));
1961 return iemRaiseTaskSwitchFault0(pIemCpu);
1962 }
1963
1964 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1965 if ((NewSS & X86_SEL_RPL) != uCpl)
1966 {
1967 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1968 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1969 }
1970
1971 /*
1972 * Read the descriptor.
1973 */
1974 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1975 if (rcStrict != VINF_SUCCESS)
1976 return rcStrict;
1977
1978 /*
1979 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1980 */
1981 if (!pDesc->Legacy.Gen.u1DescType)
1982 {
1983 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1984 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1985 }
1986
1987 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1988 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1989 {
1990 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1991 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1992 }
1993 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1994 {
1995 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1996 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1997 }
1998
1999 /* Is it there? */
2000 /** @todo testcase: Is this checked before the canonical / limit check below? */
2001 if (!pDesc->Legacy.Gen.u1Present)
2002 {
2003 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2004 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2005 }
2006
2007 return VINF_SUCCESS;
2008}
2009
2010
2011/**
2012 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2013 * not.
2014 *
2015 * @param a_pIemCpu The IEM per CPU data.
2016 * @param a_pCtx The CPU context.
2017 */
2018#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2019# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2020 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2021 ? (a_pCtx)->eflags.u \
2022 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2023#else
2024# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2025 ( (a_pCtx)->eflags.u )
2026#endif
2027
2028/**
2029 * Updates the EFLAGS in the correct manner wrt. PATM.
2030 *
2031 * @param a_pIemCpu The IEM per CPU data.
2032 * @param a_pCtx The CPU context.
2033 */
2034#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2035# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2036 do { \
2037 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2038 (a_pCtx)->eflags.u = (a_fEfl); \
2039 else \
2040 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2041 } while (0)
2042#else
2043# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2044 do { \
2045 (a_pCtx)->eflags.u = (a_fEfl); \
2046 } while (0)
2047#endif
2048
2049
2050/** @} */
2051
2052/** @name Raising Exceptions.
2053 *
2054 * @{
2055 */
2056
2057/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2058 * @{ */
2059/** CPU exception. */
2060#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2061/** External interrupt (from PIC, APIC, whatever). */
2062#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2063/** Software interrupt (int or into, not bound).
2064 * Returns to the following instruction */
2065#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2066/** Takes an error code. */
2067#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2068/** Takes a CR2. */
2069#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2070/** Generated by the breakpoint instruction. */
2071#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2072/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2073#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2074/** @} */
2075
2076
2077/**
2078 * Loads the specified stack far pointer from the TSS.
2079 *
2080 * @returns VBox strict status code.
2081 * @param pIemCpu The IEM per CPU instance data.
2082 * @param pCtx The CPU context.
2083 * @param uCpl The CPL to load the stack for.
2084 * @param pSelSS Where to return the new stack segment.
2085 * @param puEsp Where to return the new stack pointer.
2086 */
2087IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2088 PRTSEL pSelSS, uint32_t *puEsp)
2089{
2090 VBOXSTRICTRC rcStrict;
2091 Assert(uCpl < 4);
2092 *puEsp = 0; /* make gcc happy */
2093 *pSelSS = 0; /* make gcc happy */
2094
2095 switch (pCtx->tr.Attr.n.u4Type)
2096 {
2097 /*
2098 * 16-bit TSS (X86TSS16).
2099 */
2100 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2101 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2102 {
2103 uint32_t off = uCpl * 4 + 2;
2104 if (off + 4 > pCtx->tr.u32Limit)
2105 {
2106 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2107 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2108 }
2109
2110 uint32_t u32Tmp = 0; /* gcc maybe... */
2111 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2112 if (rcStrict == VINF_SUCCESS)
2113 {
2114 *puEsp = RT_LOWORD(u32Tmp);
2115 *pSelSS = RT_HIWORD(u32Tmp);
2116 return VINF_SUCCESS;
2117 }
2118 break;
2119 }
2120
2121 /*
2122 * 32-bit TSS (X86TSS32).
2123 */
2124 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2125 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2126 {
2127 uint32_t off = uCpl * 8 + 4;
2128 if (off + 7 > pCtx->tr.u32Limit)
2129 {
2130 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2131 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2132 }
2133
2134 uint64_t u64Tmp;
2135 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2136 if (rcStrict == VINF_SUCCESS)
2137 {
2138 *puEsp = u64Tmp & UINT32_MAX;
2139 *pSelSS = (RTSEL)(u64Tmp >> 32);
2140 return VINF_SUCCESS;
2141 }
2142 break;
2143 }
2144
2145 default:
2146 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
2147 }
2148 return rcStrict;
2149}
2150
2151
2152/**
2153 * Loads the specified stack pointer from the 64-bit TSS.
2154 *
2155 * @returns VBox strict status code.
2156 * @param pIemCpu The IEM per CPU instance data.
2157 * @param pCtx The CPU context.
2158 * @param uCpl The CPL to load the stack for.
2159 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2160 * @param puRsp Where to return the new stack pointer.
2161 */
2162IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2163{
2164 Assert(uCpl < 4);
2165 Assert(uIst < 8);
2166 *puRsp = 0; /* make gcc happy */
2167
2168 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2);
2169
2170 uint32_t off;
2171 if (uIst)
2172 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2173 else
2174 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2175 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2176 {
2177 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2178 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2179 }
2180
2181 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2182}
2183
2184
2185/**
2186 * Adjust the CPU state according to the exception being raised.
2187 *
2188 * @param pCtx The CPU context.
2189 * @param u8Vector The exception that has been raised.
2190 */
2191DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2192{
2193 switch (u8Vector)
2194 {
2195 case X86_XCPT_DB:
2196 pCtx->dr[7] &= ~X86_DR7_GD;
2197 break;
2198 /** @todo Read the AMD and Intel exception reference... */
2199 }
2200}
2201
2202
2203/**
2204 * Implements exceptions and interrupts for real mode.
2205 *
2206 * @returns VBox strict status code.
2207 * @param pIemCpu The IEM per CPU instance data.
2208 * @param pCtx The CPU context.
2209 * @param cbInstr The number of bytes to offset rIP by in the return
2210 * address.
2211 * @param u8Vector The interrupt / exception vector number.
2212 * @param fFlags The flags.
2213 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2214 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2215 */
2216IEM_STATIC VBOXSTRICTRC
2217iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2218 PCPUMCTX pCtx,
2219 uint8_t cbInstr,
2220 uint8_t u8Vector,
2221 uint32_t fFlags,
2222 uint16_t uErr,
2223 uint64_t uCr2)
2224{
2225 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
2226 NOREF(uErr); NOREF(uCr2);
2227
2228 /*
2229 * Read the IDT entry.
2230 */
2231 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2232 {
2233 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2234 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2235 }
2236 RTFAR16 Idte;
2237 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2238 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2239 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2240 return rcStrict;
2241
2242 /*
2243 * Push the stack frame.
2244 */
2245 uint16_t *pu16Frame;
2246 uint64_t uNewRsp;
2247 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2248 if (rcStrict != VINF_SUCCESS)
2249 return rcStrict;
2250
2251 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2252 pu16Frame[2] = (uint16_t)fEfl;
2253 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2254 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2255 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2256 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2257 return rcStrict;
2258
2259 /*
2260 * Load the vector address into cs:ip and make exception specific state
2261 * adjustments.
2262 */
2263 pCtx->cs.Sel = Idte.sel;
2264 pCtx->cs.ValidSel = Idte.sel;
2265 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2266 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2267 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2268 pCtx->rip = Idte.off;
2269 fEfl &= ~X86_EFL_IF;
2270 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2271
2272 /** @todo do we actually do this in real mode? */
2273 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2274 iemRaiseXcptAdjustState(pCtx, u8Vector);
2275
2276 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2277}
2278
2279
2280/**
2281 * Loads a NULL data selector into when coming from V8086 mode.
2282 *
2283 * @param pIemCpu The IEM per CPU instance data.
2284 * @param pSReg Pointer to the segment register.
2285 */
2286IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2287{
2288 pSReg->Sel = 0;
2289 pSReg->ValidSel = 0;
2290 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2291 {
2292 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2293 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2294 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2295 }
2296 else
2297 {
2298 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2299 /** @todo check this on AMD-V */
2300 pSReg->u64Base = 0;
2301 pSReg->u32Limit = 0;
2302 }
2303}
2304
2305
2306/**
2307 * Loads a segment selector during a task switch in V8086 mode.
2308 *
2309 * @param pIemCpu The IEM per CPU instance data.
2310 * @param pSReg Pointer to the segment register.
2311 * @param uSel The selector value to load.
2312 */
2313IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2314{
2315 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2316 pSReg->Sel = uSel;
2317 pSReg->ValidSel = uSel;
2318 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2319 pSReg->u64Base = uSel << 4;
2320 pSReg->u32Limit = 0xffff;
2321 pSReg->Attr.u = 0xf3;
2322}
2323
2324
2325/**
2326 * Loads a NULL data selector into a selector register, both the hidden and
2327 * visible parts, in protected mode.
2328 *
2329 * @param pIemCpu The IEM state of the calling EMT.
2330 * @param pSReg Pointer to the segment register.
2331 * @param uRpl The RPL.
2332 */
2333IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2334{
2335 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2336 * data selector in protected mode. */
2337 pSReg->Sel = uRpl;
2338 pSReg->ValidSel = uRpl;
2339 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2340 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2341 {
2342 /* VT-x (Intel 3960x) observed doing something like this. */
2343 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2344 pSReg->u32Limit = UINT32_MAX;
2345 pSReg->u64Base = 0;
2346 }
2347 else
2348 {
2349 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2350 pSReg->u32Limit = 0;
2351 pSReg->u64Base = 0;
2352 }
2353}
2354
2355
2356/**
2357 * Loads a segment selector during a task switch in protected mode. In this task
2358 * switch scenario, we would throw #TS exceptions rather than #GPs.
2359 *
2360 * @returns VBox strict status code.
2361 * @param pIemCpu The IEM per CPU instance data.
2362 * @param pSReg Pointer to the segment register.
2363 * @param uSel The new selector value.
2364 *
2365 * @remarks This does -NOT- handle CS or SS.
2366 * @remarks This expects pIemCpu->uCpl to be up to date.
2367 */
2368IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2369{
2370 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2371
2372 /* Null data selector. */
2373 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2374 {
2375 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2376 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2377 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2378 return VINF_SUCCESS;
2379 }
2380
2381 /* Fetch the descriptor. */
2382 IEMSELDESC Desc;
2383 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2384 if (rcStrict != VINF_SUCCESS)
2385 {
2386 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2387 VBOXSTRICTRC_VAL(rcStrict)));
2388 return rcStrict;
2389 }
2390
2391 /* Must be a data segment or readable code segment. */
2392 if ( !Desc.Legacy.Gen.u1DescType
2393 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2394 {
2395 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2396 Desc.Legacy.Gen.u4Type));
2397 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2398 }
2399
2400 /* Check privileges for data segments and non-conforming code segments. */
2401 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2402 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2403 {
2404 /* The RPL and the new CPL must be less than or equal to the DPL. */
2405 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2406 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2407 {
2408 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2409 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2410 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2411 }
2412 }
2413
2414 /* Is it there? */
2415 if (!Desc.Legacy.Gen.u1Present)
2416 {
2417 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2418 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2419 }
2420
2421 /* The base and limit. */
2422 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2423 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2424
2425 /*
2426 * Ok, everything checked out fine. Now set the accessed bit before
2427 * committing the result into the registers.
2428 */
2429 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2430 {
2431 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2432 if (rcStrict != VINF_SUCCESS)
2433 return rcStrict;
2434 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2435 }
2436
2437 /* Commit */
2438 pSReg->Sel = uSel;
2439 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2440 pSReg->u32Limit = cbLimit;
2441 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2442 pSReg->ValidSel = uSel;
2443 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2444 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2445 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2446
2447 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2448 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2449 return VINF_SUCCESS;
2450}
2451
2452
2453/**
2454 * Performs a task switch.
2455 *
2456 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2457 * caller is responsible for performing the necessary checks (like DPL, TSS
2458 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2459 * reference for JMP, CALL, IRET.
2460 *
2461 * If the task switch is the due to a software interrupt or hardware exception,
2462 * the caller is responsible for validating the TSS selector and descriptor. See
2463 * Intel Instruction reference for INT n.
2464 *
2465 * @returns VBox strict status code.
2466 * @param pIemCpu The IEM per CPU instance data.
2467 * @param pCtx The CPU context.
2468 * @param enmTaskSwitch What caused this task switch.
2469 * @param uNextEip The EIP effective after the task switch.
2470 * @param fFlags The flags.
2471 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2472 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2473 * @param SelTSS The TSS selector of the new task.
2474 * @param pNewDescTSS Pointer to the new TSS descriptor.
2475 */
2476IEM_STATIC VBOXSTRICTRC
2477iemTaskSwitch(PIEMCPU pIemCpu,
2478 PCPUMCTX pCtx,
2479 IEMTASKSWITCH enmTaskSwitch,
2480 uint32_t uNextEip,
2481 uint32_t fFlags,
2482 uint16_t uErr,
2483 uint64_t uCr2,
2484 RTSEL SelTSS,
2485 PIEMSELDESC pNewDescTSS)
2486{
2487 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2488 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2489
2490 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2491 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2492 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2493 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2494 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2495
2496 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2497 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2498
2499 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2500 fIsNewTSS386, pCtx->eip, uNextEip));
2501
2502 /* Update CR2 in case it's a page-fault. */
2503 /** @todo This should probably be done much earlier in IEM/PGM. See
2504 * @bugref{5653} comment #49. */
2505 if (fFlags & IEM_XCPT_FLAGS_CR2)
2506 pCtx->cr2 = uCr2;
2507
2508 /*
2509 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2510 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2511 */
2512 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2513 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2514 if (uNewTSSLimit < uNewTSSLimitMin)
2515 {
2516 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2517 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2518 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2519 }
2520
2521 /*
2522 * Check the current TSS limit. The last written byte to the current TSS during the
2523 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2524 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2525 *
2526 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2527 * end up with smaller than "legal" TSS limits.
2528 */
2529 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2530 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2531 if (uCurTSSLimit < uCurTSSLimitMin)
2532 {
2533 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2534 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2535 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2536 }
2537
2538 /*
2539 * Verify that the new TSS can be accessed and map it. Map only the required contents
2540 * and not the entire TSS.
2541 */
2542 void *pvNewTSS;
2543 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2544 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2545 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, IntRedirBitmap) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2546 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2547 * not perform correct translation if this happens. See Intel spec. 7.2.1
2548 * "Task-State Segment" */
2549 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2550 if (rcStrict != VINF_SUCCESS)
2551 {
2552 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2553 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2554 return rcStrict;
2555 }
2556
2557 /*
2558 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2559 */
2560 uint32_t u32EFlags = pCtx->eflags.u32;
2561 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2562 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2563 {
2564 PX86DESC pDescCurTSS;
2565 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2566 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2567 if (rcStrict != VINF_SUCCESS)
2568 {
2569 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2570 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2571 return rcStrict;
2572 }
2573
2574 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2575 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2576 if (rcStrict != VINF_SUCCESS)
2577 {
2578 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2579 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2580 return rcStrict;
2581 }
2582
2583 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2584 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2585 {
2586 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2587 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2588 u32EFlags &= ~X86_EFL_NT;
2589 }
2590 }
2591
2592 /*
2593 * Save the CPU state into the current TSS.
2594 */
2595 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2596 if (GCPtrNewTSS == GCPtrCurTSS)
2597 {
2598 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2599 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2600 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2601 }
2602 if (fIsNewTSS386)
2603 {
2604 /*
2605 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2606 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2607 */
2608 void *pvCurTSS32;
2609 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2610 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2611 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2612 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2613 if (rcStrict != VINF_SUCCESS)
2614 {
2615 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2616 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2617 return rcStrict;
2618 }
2619
2620 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2621 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2622 pCurTSS32->eip = uNextEip;
2623 pCurTSS32->eflags = u32EFlags;
2624 pCurTSS32->eax = pCtx->eax;
2625 pCurTSS32->ecx = pCtx->ecx;
2626 pCurTSS32->edx = pCtx->edx;
2627 pCurTSS32->ebx = pCtx->ebx;
2628 pCurTSS32->esp = pCtx->esp;
2629 pCurTSS32->ebp = pCtx->ebp;
2630 pCurTSS32->esi = pCtx->esi;
2631 pCurTSS32->edi = pCtx->edi;
2632 pCurTSS32->es = pCtx->es.Sel;
2633 pCurTSS32->cs = pCtx->cs.Sel;
2634 pCurTSS32->ss = pCtx->ss.Sel;
2635 pCurTSS32->ds = pCtx->ds.Sel;
2636 pCurTSS32->fs = pCtx->fs.Sel;
2637 pCurTSS32->gs = pCtx->gs.Sel;
2638
2639 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2640 if (rcStrict != VINF_SUCCESS)
2641 {
2642 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2643 VBOXSTRICTRC_VAL(rcStrict)));
2644 return rcStrict;
2645 }
2646 }
2647 else
2648 {
2649 /*
2650 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2651 */
2652 void *pvCurTSS16;
2653 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2654 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2655 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2656 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2657 if (rcStrict != VINF_SUCCESS)
2658 {
2659 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2660 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2661 return rcStrict;
2662 }
2663
2664 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2665 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2666 pCurTSS16->ip = uNextEip;
2667 pCurTSS16->flags = u32EFlags;
2668 pCurTSS16->ax = pCtx->ax;
2669 pCurTSS16->cx = pCtx->cx;
2670 pCurTSS16->dx = pCtx->dx;
2671 pCurTSS16->bx = pCtx->bx;
2672 pCurTSS16->sp = pCtx->sp;
2673 pCurTSS16->bp = pCtx->bp;
2674 pCurTSS16->si = pCtx->si;
2675 pCurTSS16->di = pCtx->di;
2676 pCurTSS16->es = pCtx->es.Sel;
2677 pCurTSS16->cs = pCtx->cs.Sel;
2678 pCurTSS16->ss = pCtx->ss.Sel;
2679 pCurTSS16->ds = pCtx->ds.Sel;
2680
2681 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2682 if (rcStrict != VINF_SUCCESS)
2683 {
2684 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2685 VBOXSTRICTRC_VAL(rcStrict)));
2686 return rcStrict;
2687 }
2688 }
2689
2690 /*
2691 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2692 */
2693 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2694 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2695 {
2696 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2697 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2698 pNewTSS->selPrev = pCtx->tr.Sel;
2699 }
2700
2701 /*
2702 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2703 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2704 */
2705 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2706 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2707 bool fNewDebugTrap;
2708 if (fIsNewTSS386)
2709 {
2710 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2711 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2712 uNewEip = pNewTSS32->eip;
2713 uNewEflags = pNewTSS32->eflags;
2714 uNewEax = pNewTSS32->eax;
2715 uNewEcx = pNewTSS32->ecx;
2716 uNewEdx = pNewTSS32->edx;
2717 uNewEbx = pNewTSS32->ebx;
2718 uNewEsp = pNewTSS32->esp;
2719 uNewEbp = pNewTSS32->ebp;
2720 uNewEsi = pNewTSS32->esi;
2721 uNewEdi = pNewTSS32->edi;
2722 uNewES = pNewTSS32->es;
2723 uNewCS = pNewTSS32->cs;
2724 uNewSS = pNewTSS32->ss;
2725 uNewDS = pNewTSS32->ds;
2726 uNewFS = pNewTSS32->fs;
2727 uNewGS = pNewTSS32->gs;
2728 uNewLdt = pNewTSS32->selLdt;
2729 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2730 }
2731 else
2732 {
2733 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2734 uNewCr3 = 0;
2735 uNewEip = pNewTSS16->ip;
2736 uNewEflags = pNewTSS16->flags;
2737 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2738 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2739 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2740 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2741 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2742 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2743 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2744 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2745 uNewES = pNewTSS16->es;
2746 uNewCS = pNewTSS16->cs;
2747 uNewSS = pNewTSS16->ss;
2748 uNewDS = pNewTSS16->ds;
2749 uNewFS = 0;
2750 uNewGS = 0;
2751 uNewLdt = pNewTSS16->selLdt;
2752 fNewDebugTrap = false;
2753 }
2754
2755 if (GCPtrNewTSS == GCPtrCurTSS)
2756 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2757 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2758
2759 /*
2760 * We're done accessing the new TSS.
2761 */
2762 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2763 if (rcStrict != VINF_SUCCESS)
2764 {
2765 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2766 return rcStrict;
2767 }
2768
2769 /*
2770 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2771 */
2772 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2773 {
2774 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2775 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2776 if (rcStrict != VINF_SUCCESS)
2777 {
2778 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2779 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2780 return rcStrict;
2781 }
2782
2783 /* Check that the descriptor indicates the new TSS is available (not busy). */
2784 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2785 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2786 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2787
2788 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2789 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2790 if (rcStrict != VINF_SUCCESS)
2791 {
2792 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2793 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2794 return rcStrict;
2795 }
2796 }
2797
2798 /*
2799 * From this point on, we're technically in the new task. We will defer exceptions
2800 * until the completion of the task switch but before executing any instructions in the new task.
2801 */
2802 pCtx->tr.Sel = SelTSS;
2803 pCtx->tr.ValidSel = SelTSS;
2804 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2805 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2806 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2807 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2808 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2809
2810 /* Set the busy bit in TR. */
2811 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2812 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2813 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2814 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2815 {
2816 uNewEflags |= X86_EFL_NT;
2817 }
2818
2819 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2820 pCtx->cr0 |= X86_CR0_TS;
2821 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2822
2823 pCtx->eip = uNewEip;
2824 pCtx->eax = uNewEax;
2825 pCtx->ecx = uNewEcx;
2826 pCtx->edx = uNewEdx;
2827 pCtx->ebx = uNewEbx;
2828 pCtx->esp = uNewEsp;
2829 pCtx->ebp = uNewEbp;
2830 pCtx->esi = uNewEsi;
2831 pCtx->edi = uNewEdi;
2832
2833 uNewEflags &= X86_EFL_LIVE_MASK;
2834 uNewEflags |= X86_EFL_RA1_MASK;
2835 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2836
2837 /*
2838 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2839 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2840 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2841 */
2842 pCtx->es.Sel = uNewES;
2843 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2844 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2845
2846 pCtx->cs.Sel = uNewCS;
2847 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2848 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2849
2850 pCtx->ss.Sel = uNewSS;
2851 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2852 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2853
2854 pCtx->ds.Sel = uNewDS;
2855 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2856 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2857
2858 pCtx->fs.Sel = uNewFS;
2859 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2860 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2861
2862 pCtx->gs.Sel = uNewGS;
2863 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2864 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2865 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2866
2867 pCtx->ldtr.Sel = uNewLdt;
2868 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2869 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2870 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2871
2872 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2873 {
2874 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2875 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2876 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2877 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2878 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2879 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2880 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2881 }
2882
2883 /*
2884 * Switch CR3 for the new task.
2885 */
2886 if ( fIsNewTSS386
2887 && (pCtx->cr0 & X86_CR0_PG))
2888 {
2889 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2890 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2891 {
2892 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2893 AssertRCSuccessReturn(rc, rc);
2894 }
2895 else
2896 pCtx->cr3 = uNewCr3;
2897
2898 /* Inform PGM. */
2899 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2900 {
2901 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2902 AssertRCReturn(rc, rc);
2903 /* ignore informational status codes */
2904 }
2905 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2906 }
2907
2908 /*
2909 * Switch LDTR for the new task.
2910 */
2911 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2912 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2913 else
2914 {
2915 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2916
2917 IEMSELDESC DescNewLdt;
2918 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2919 if (rcStrict != VINF_SUCCESS)
2920 {
2921 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2922 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2923 return rcStrict;
2924 }
2925 if ( !DescNewLdt.Legacy.Gen.u1Present
2926 || DescNewLdt.Legacy.Gen.u1DescType
2927 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2928 {
2929 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2930 uNewLdt, DescNewLdt.Legacy.u));
2931 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2932 }
2933
2934 pCtx->ldtr.ValidSel = uNewLdt;
2935 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2936 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2937 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2938 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2939 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2940 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2941 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2942 }
2943
2944 IEMSELDESC DescSS;
2945 if (IEM_IS_V86_MODE(pIemCpu))
2946 {
2947 pIemCpu->uCpl = 3;
2948 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
2949 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
2950 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
2951 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
2952 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
2953 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
2954 }
2955 else
2956 {
2957 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
2958
2959 /*
2960 * Load the stack segment for the new task.
2961 */
2962 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2963 {
2964 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2965 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2966 }
2967
2968 /* Fetch the descriptor. */
2969 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
2970 if (rcStrict != VINF_SUCCESS)
2971 {
2972 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2973 VBOXSTRICTRC_VAL(rcStrict)));
2974 return rcStrict;
2975 }
2976
2977 /* SS must be a data segment and writable. */
2978 if ( !DescSS.Legacy.Gen.u1DescType
2979 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2980 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2981 {
2982 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2983 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2984 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2985 }
2986
2987 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2988 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2989 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2990 {
2991 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2992 uNewCpl));
2993 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2994 }
2995
2996 /* Is it there? */
2997 if (!DescSS.Legacy.Gen.u1Present)
2998 {
2999 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3000 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3001 }
3002
3003 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3004 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3005
3006 /* Set the accessed bit before committing the result into SS. */
3007 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3008 {
3009 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3010 if (rcStrict != VINF_SUCCESS)
3011 return rcStrict;
3012 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3013 }
3014
3015 /* Commit SS. */
3016 pCtx->ss.Sel = uNewSS;
3017 pCtx->ss.ValidSel = uNewSS;
3018 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3019 pCtx->ss.u32Limit = cbLimit;
3020 pCtx->ss.u64Base = u64Base;
3021 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3022 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3023
3024 /* CPL has changed, update IEM before loading rest of segments. */
3025 pIemCpu->uCpl = uNewCpl;
3026
3027 /*
3028 * Load the data segments for the new task.
3029 */
3030 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3031 if (rcStrict != VINF_SUCCESS)
3032 return rcStrict;
3033 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3034 if (rcStrict != VINF_SUCCESS)
3035 return rcStrict;
3036 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3037 if (rcStrict != VINF_SUCCESS)
3038 return rcStrict;
3039 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3040 if (rcStrict != VINF_SUCCESS)
3041 return rcStrict;
3042
3043 /*
3044 * Load the code segment for the new task.
3045 */
3046 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3047 {
3048 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3049 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3050 }
3051
3052 /* Fetch the descriptor. */
3053 IEMSELDESC DescCS;
3054 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3055 if (rcStrict != VINF_SUCCESS)
3056 {
3057 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3058 return rcStrict;
3059 }
3060
3061 /* CS must be a code segment. */
3062 if ( !DescCS.Legacy.Gen.u1DescType
3063 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3064 {
3065 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3066 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3067 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3068 }
3069
3070 /* For conforming CS, DPL must be less than or equal to the RPL. */
3071 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3072 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3073 {
3074 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3075 DescCS.Legacy.Gen.u2Dpl));
3076 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3077 }
3078
3079 /* For non-conforming CS, DPL must match RPL. */
3080 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3081 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3082 {
3083 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3084 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3085 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3086 }
3087
3088 /* Is it there? */
3089 if (!DescCS.Legacy.Gen.u1Present)
3090 {
3091 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3092 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3093 }
3094
3095 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3096 u64Base = X86DESC_BASE(&DescCS.Legacy);
3097
3098 /* Set the accessed bit before committing the result into CS. */
3099 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3100 {
3101 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3102 if (rcStrict != VINF_SUCCESS)
3103 return rcStrict;
3104 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3105 }
3106
3107 /* Commit CS. */
3108 pCtx->cs.Sel = uNewCS;
3109 pCtx->cs.ValidSel = uNewCS;
3110 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3111 pCtx->cs.u32Limit = cbLimit;
3112 pCtx->cs.u64Base = u64Base;
3113 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3114 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3115 }
3116
3117 /** @todo Debug trap. */
3118 if (fIsNewTSS386 && fNewDebugTrap)
3119 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3120
3121 /*
3122 * Construct the error code masks based on what caused this task switch.
3123 * See Intel Instruction reference for INT.
3124 */
3125 uint16_t uExt;
3126 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3127 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3128 {
3129 uExt = 1;
3130 }
3131 else
3132 uExt = 0;
3133
3134 /*
3135 * Push any error code on to the new stack.
3136 */
3137 if (fFlags & IEM_XCPT_FLAGS_ERR)
3138 {
3139 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3140 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3141 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3142
3143 /* Check that there is sufficient space on the stack. */
3144 /** @todo Factor out segment limit checking for normal/expand down segments
3145 * into a separate function. */
3146 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3147 {
3148 if ( pCtx->esp - 1 > cbLimitSS
3149 || pCtx->esp < cbStackFrame)
3150 {
3151 /** @todo Intel says #SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3152 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3153 cbStackFrame));
3154 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3155 }
3156 }
3157 else
3158 {
3159 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3160 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3161 {
3162 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3163 cbStackFrame));
3164 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3165 }
3166 }
3167
3168
3169 if (fIsNewTSS386)
3170 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3171 else
3172 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3173 if (rcStrict != VINF_SUCCESS)
3174 {
3175 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3176 VBOXSTRICTRC_VAL(rcStrict)));
3177 return rcStrict;
3178 }
3179 }
3180
3181 /* Check the new EIP against the new CS limit. */
3182 if (pCtx->eip > pCtx->cs.u32Limit)
3183 {
3184 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3185 pCtx->eip, pCtx->cs.u32Limit));
3186 /** @todo Intel says #GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3187 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3188 }
3189
3190 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3191 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3192}
3193
3194
3195/**
3196 * Implements exceptions and interrupts for protected mode.
3197 *
3198 * @returns VBox strict status code.
3199 * @param pIemCpu The IEM per CPU instance data.
3200 * @param pCtx The CPU context.
3201 * @param cbInstr The number of bytes to offset rIP by in the return
3202 * address.
3203 * @param u8Vector The interrupt / exception vector number.
3204 * @param fFlags The flags.
3205 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3206 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3207 */
3208IEM_STATIC VBOXSTRICTRC
3209iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3210 PCPUMCTX pCtx,
3211 uint8_t cbInstr,
3212 uint8_t u8Vector,
3213 uint32_t fFlags,
3214 uint16_t uErr,
3215 uint64_t uCr2)
3216{
3217 /*
3218 * Read the IDT entry.
3219 */
3220 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3221 {
3222 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3223 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3224 }
3225 X86DESC Idte;
3226 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3227 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3228 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3229 return rcStrict;
3230 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3231 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3232 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3233
3234 /*
3235 * Check the descriptor type, DPL and such.
3236 * ASSUMES this is done in the same order as described for call-gate calls.
3237 */
3238 if (Idte.Gate.u1DescType)
3239 {
3240 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3241 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3242 }
3243 bool fTaskGate = false;
3244 uint8_t f32BitGate = true;
3245 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3246 switch (Idte.Gate.u4Type)
3247 {
3248 case X86_SEL_TYPE_SYS_UNDEFINED:
3249 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3250 case X86_SEL_TYPE_SYS_LDT:
3251 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3252 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3253 case X86_SEL_TYPE_SYS_UNDEFINED2:
3254 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3255 case X86_SEL_TYPE_SYS_UNDEFINED3:
3256 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3257 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3258 case X86_SEL_TYPE_SYS_UNDEFINED4:
3259 {
3260 /** @todo check what actually happens when the type is wrong...
3261 * esp. call gates. */
3262 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3263 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3264 }
3265
3266 case X86_SEL_TYPE_SYS_286_INT_GATE:
3267 f32BitGate = false;
3268 case X86_SEL_TYPE_SYS_386_INT_GATE:
3269 fEflToClear |= X86_EFL_IF;
3270 break;
3271
3272 case X86_SEL_TYPE_SYS_TASK_GATE:
3273 fTaskGate = true;
3274#ifndef IEM_IMPLEMENTS_TASKSWITCH
3275 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3276#endif
3277 break;
3278
3279 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3280 f32BitGate = false;
3281 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3282 break;
3283
3284 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3285 }
3286
3287 /* Check DPL against CPL if applicable. */
3288 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3289 {
3290 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3291 {
3292 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3293 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3294 }
3295 }
3296
3297 /* Is it there? */
3298 if (!Idte.Gate.u1Present)
3299 {
3300 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3301 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3302 }
3303
3304 /* Is it a task-gate? */
3305 if (fTaskGate)
3306 {
3307 /*
3308 * Construct the error code masks based on what caused this task switch.
3309 * See Intel Instruction reference for INT.
3310 */
3311 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3312 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3313 RTSEL SelTSS = Idte.Gate.u16Sel;
3314
3315 /*
3316 * Fetch the TSS descriptor in the GDT.
3317 */
3318 IEMSELDESC DescTSS;
3319 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3320 if (rcStrict != VINF_SUCCESS)
3321 {
3322 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3323 VBOXSTRICTRC_VAL(rcStrict)));
3324 return rcStrict;
3325 }
3326
3327 /* The TSS descriptor must be a system segment and be available (not busy). */
3328 if ( DescTSS.Legacy.Gen.u1DescType
3329 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3330 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3331 {
3332 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3333 u8Vector, SelTSS, DescTSS.Legacy.au64));
3334 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3335 }
3336
3337 /* The TSS must be present. */
3338 if (!DescTSS.Legacy.Gen.u1Present)
3339 {
3340 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3341 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3342 }
3343
3344 /* Do the actual task switch. */
3345 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3346 }
3347
3348 /* A null CS is bad. */
3349 RTSEL NewCS = Idte.Gate.u16Sel;
3350 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3351 {
3352 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3353 return iemRaiseGeneralProtectionFault0(pIemCpu);
3354 }
3355
3356 /* Fetch the descriptor for the new CS. */
3357 IEMSELDESC DescCS;
3358 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3359 if (rcStrict != VINF_SUCCESS)
3360 {
3361 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3362 return rcStrict;
3363 }
3364
3365 /* Must be a code segment. */
3366 if (!DescCS.Legacy.Gen.u1DescType)
3367 {
3368 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3369 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3370 }
3371 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3372 {
3373 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3374 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3375 }
3376
3377 /* Don't allow lowering the privilege level. */
3378 /** @todo Does the lowering of privileges apply to software interrupts
3379 * only? This has bearings on the more-privileged or
3380 * same-privilege stack behavior further down. A testcase would
3381 * be nice. */
3382 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3383 {
3384 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3385 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3386 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3387 }
3388
3389 /* Make sure the selector is present. */
3390 if (!DescCS.Legacy.Gen.u1Present)
3391 {
3392 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3393 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3394 }
3395
3396 /* Check the new EIP against the new CS limit. */
3397 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3398 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3399 ? Idte.Gate.u16OffsetLow
3400 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3401 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3402 if (uNewEip > cbLimitCS)
3403 {
3404 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3405 u8Vector, uNewEip, cbLimitCS, NewCS));
3406 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3407 }
3408
3409 /* Calc the flag image to push. */
3410 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3411 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3412 fEfl &= ~X86_EFL_RF;
3413 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3414 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3415
3416 /* From V8086 mode only go to CPL 0. */
3417 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3418 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3419 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3420 {
3421 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3422 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3423 }
3424
3425 /*
3426 * If the privilege level changes, we need to get a new stack from the TSS.
3427 * This in turns means validating the new SS and ESP...
3428 */
3429 if (uNewCpl != pIemCpu->uCpl)
3430 {
3431 RTSEL NewSS;
3432 uint32_t uNewEsp;
3433 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3434 if (rcStrict != VINF_SUCCESS)
3435 return rcStrict;
3436
3437 IEMSELDESC DescSS;
3438 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3439 if (rcStrict != VINF_SUCCESS)
3440 return rcStrict;
3441
3442 /* Check that there is sufficient space for the stack frame. */
3443 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3444 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3445 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3446 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3447
3448 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3449 {
3450 if ( uNewEsp - 1 > cbLimitSS
3451 || uNewEsp < cbStackFrame)
3452 {
3453 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3454 u8Vector, NewSS, uNewEsp, cbStackFrame));
3455 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3456 }
3457 }
3458 else
3459 {
3460 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3461 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3462 {
3463 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3464 u8Vector, NewSS, uNewEsp, cbStackFrame));
3465 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3466 }
3467 }
3468
3469 /*
3470 * Start making changes.
3471 */
3472
3473 /* Create the stack frame. */
3474 RTPTRUNION uStackFrame;
3475 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3476 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3477 if (rcStrict != VINF_SUCCESS)
3478 return rcStrict;
3479 void * const pvStackFrame = uStackFrame.pv;
3480 if (f32BitGate)
3481 {
3482 if (fFlags & IEM_XCPT_FLAGS_ERR)
3483 *uStackFrame.pu32++ = uErr;
3484 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3485 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3486 uStackFrame.pu32[2] = fEfl;
3487 uStackFrame.pu32[3] = pCtx->esp;
3488 uStackFrame.pu32[4] = pCtx->ss.Sel;
3489 if (fEfl & X86_EFL_VM)
3490 {
3491 uStackFrame.pu32[1] = pCtx->cs.Sel;
3492 uStackFrame.pu32[5] = pCtx->es.Sel;
3493 uStackFrame.pu32[6] = pCtx->ds.Sel;
3494 uStackFrame.pu32[7] = pCtx->fs.Sel;
3495 uStackFrame.pu32[8] = pCtx->gs.Sel;
3496 }
3497 }
3498 else
3499 {
3500 if (fFlags & IEM_XCPT_FLAGS_ERR)
3501 *uStackFrame.pu16++ = uErr;
3502 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3503 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3504 uStackFrame.pu16[2] = fEfl;
3505 uStackFrame.pu16[3] = pCtx->sp;
3506 uStackFrame.pu16[4] = pCtx->ss.Sel;
3507 if (fEfl & X86_EFL_VM)
3508 {
3509 uStackFrame.pu16[1] = pCtx->cs.Sel;
3510 uStackFrame.pu16[5] = pCtx->es.Sel;
3511 uStackFrame.pu16[6] = pCtx->ds.Sel;
3512 uStackFrame.pu16[7] = pCtx->fs.Sel;
3513 uStackFrame.pu16[8] = pCtx->gs.Sel;
3514 }
3515 }
3516 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3517 if (rcStrict != VINF_SUCCESS)
3518 return rcStrict;
3519
3520 /* Mark the selectors 'accessed' (hope this is the correct time). */
3521 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3522 * after pushing the stack frame? (Write protect the gdt + stack to
3523 * find out.) */
3524 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3525 {
3526 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3527 if (rcStrict != VINF_SUCCESS)
3528 return rcStrict;
3529 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3530 }
3531
3532 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3533 {
3534 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3535 if (rcStrict != VINF_SUCCESS)
3536 return rcStrict;
3537 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3538 }
3539
3540 /*
3541 * Start comitting the register changes (joins with the DPL=CPL branch).
3542 */
3543 pCtx->ss.Sel = NewSS;
3544 pCtx->ss.ValidSel = NewSS;
3545 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3546 pCtx->ss.u32Limit = cbLimitSS;
3547 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3548 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3549 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
3550 pIemCpu->uCpl = uNewCpl;
3551
3552 if (fEfl & X86_EFL_VM)
3553 {
3554 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3555 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3556 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3557 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3558 }
3559 }
3560 /*
3561 * Same privilege, no stack change and smaller stack frame.
3562 */
3563 else
3564 {
3565 uint64_t uNewRsp;
3566 RTPTRUNION uStackFrame;
3567 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3568 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3569 if (rcStrict != VINF_SUCCESS)
3570 return rcStrict;
3571 void * const pvStackFrame = uStackFrame.pv;
3572
3573 if (f32BitGate)
3574 {
3575 if (fFlags & IEM_XCPT_FLAGS_ERR)
3576 *uStackFrame.pu32++ = uErr;
3577 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3578 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3579 uStackFrame.pu32[2] = fEfl;
3580 }
3581 else
3582 {
3583 if (fFlags & IEM_XCPT_FLAGS_ERR)
3584 *uStackFrame.pu16++ = uErr;
3585 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3586 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3587 uStackFrame.pu16[2] = fEfl;
3588 }
3589 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3590 if (rcStrict != VINF_SUCCESS)
3591 return rcStrict;
3592
3593 /* Mark the CS selector as 'accessed'. */
3594 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3595 {
3596 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3597 if (rcStrict != VINF_SUCCESS)
3598 return rcStrict;
3599 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3600 }
3601
3602 /*
3603 * Start committing the register changes (joins with the other branch).
3604 */
3605 pCtx->rsp = uNewRsp;
3606 }
3607
3608 /* ... register committing continues. */
3609 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3610 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3611 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3612 pCtx->cs.u32Limit = cbLimitCS;
3613 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3614 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3615
3616 pCtx->rip = uNewEip;
3617 fEfl &= ~fEflToClear;
3618 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3619
3620 if (fFlags & IEM_XCPT_FLAGS_CR2)
3621 pCtx->cr2 = uCr2;
3622
3623 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3624 iemRaiseXcptAdjustState(pCtx, u8Vector);
3625
3626 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3627}
3628
3629
3630/**
3631 * Implements exceptions and interrupts for long mode.
3632 *
3633 * @returns VBox strict status code.
3634 * @param pIemCpu The IEM per CPU instance data.
3635 * @param pCtx The CPU context.
3636 * @param cbInstr The number of bytes to offset rIP by in the return
3637 * address.
3638 * @param u8Vector The interrupt / exception vector number.
3639 * @param fFlags The flags.
3640 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3641 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3642 */
3643IEM_STATIC VBOXSTRICTRC
3644iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3645 PCPUMCTX pCtx,
3646 uint8_t cbInstr,
3647 uint8_t u8Vector,
3648 uint32_t fFlags,
3649 uint16_t uErr,
3650 uint64_t uCr2)
3651{
3652 /*
3653 * Read the IDT entry.
3654 */
3655 uint16_t offIdt = (uint16_t)u8Vector << 4;
3656 if (pCtx->idtr.cbIdt < offIdt + 7)
3657 {
3658 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3659 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3660 }
3661 X86DESC64 Idte;
3662 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3663 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3664 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3665 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3666 return rcStrict;
3667 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3668 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3669 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3670
3671 /*
3672 * Check the descriptor type, DPL and such.
3673 * ASSUMES this is done in the same order as described for call-gate calls.
3674 */
3675 if (Idte.Gate.u1DescType)
3676 {
3677 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3678 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3679 }
3680 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3681 switch (Idte.Gate.u4Type)
3682 {
3683 case AMD64_SEL_TYPE_SYS_INT_GATE:
3684 fEflToClear |= X86_EFL_IF;
3685 break;
3686 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3687 break;
3688
3689 default:
3690 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3691 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3692 }
3693
3694 /* Check DPL against CPL if applicable. */
3695 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3696 {
3697 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3698 {
3699 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3700 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3701 }
3702 }
3703
3704 /* Is it there? */
3705 if (!Idte.Gate.u1Present)
3706 {
3707 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3708 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3709 }
3710
3711 /* A null CS is bad. */
3712 RTSEL NewCS = Idte.Gate.u16Sel;
3713 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3714 {
3715 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3716 return iemRaiseGeneralProtectionFault0(pIemCpu);
3717 }
3718
3719 /* Fetch the descriptor for the new CS. */
3720 IEMSELDESC DescCS;
3721 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3722 if (rcStrict != VINF_SUCCESS)
3723 {
3724 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3725 return rcStrict;
3726 }
3727
3728 /* Must be a 64-bit code segment. */
3729 if (!DescCS.Long.Gen.u1DescType)
3730 {
3731 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3732 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3733 }
3734 if ( !DescCS.Long.Gen.u1Long
3735 || DescCS.Long.Gen.u1DefBig
3736 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3737 {
3738 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3739 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3740 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3741 }
3742
3743 /* Don't allow lowering the privilege level. For non-conforming CS
3744 selectors, the CS.DPL sets the privilege level the trap/interrupt
3745 handler runs at. For conforming CS selectors, the CPL remains
3746 unchanged, but the CS.DPL must be <= CPL. */
3747 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3748 * when CPU in Ring-0. Result \#GP? */
3749 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3750 {
3751 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3752 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3753 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3754 }
3755
3756
3757 /* Make sure the selector is present. */
3758 if (!DescCS.Legacy.Gen.u1Present)
3759 {
3760 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3761 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3762 }
3763
3764 /* Check that the new RIP is canonical. */
3765 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3766 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3767 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3768 if (!IEM_IS_CANONICAL(uNewRip))
3769 {
3770 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3771 return iemRaiseGeneralProtectionFault0(pIemCpu);
3772 }
3773
3774 /*
3775 * If the privilege level changes or if the IST isn't zero, we need to get
3776 * a new stack from the TSS.
3777 */
3778 uint64_t uNewRsp;
3779 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3780 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3781 if ( uNewCpl != pIemCpu->uCpl
3782 || Idte.Gate.u3IST != 0)
3783 {
3784 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787 }
3788 else
3789 uNewRsp = pCtx->rsp;
3790 uNewRsp &= ~(uint64_t)0xf;
3791
3792 /*
3793 * Calc the flag image to push.
3794 */
3795 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3796 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3797 fEfl &= ~X86_EFL_RF;
3798 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3799 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3800
3801 /*
3802 * Start making changes.
3803 */
3804
3805 /* Create the stack frame. */
3806 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3807 RTPTRUNION uStackFrame;
3808 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3809 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3810 if (rcStrict != VINF_SUCCESS)
3811 return rcStrict;
3812 void * const pvStackFrame = uStackFrame.pv;
3813
3814 if (fFlags & IEM_XCPT_FLAGS_ERR)
3815 *uStackFrame.pu64++ = uErr;
3816 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3817 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3818 uStackFrame.pu64[2] = fEfl;
3819 uStackFrame.pu64[3] = pCtx->rsp;
3820 uStackFrame.pu64[4] = pCtx->ss.Sel;
3821 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3822 if (rcStrict != VINF_SUCCESS)
3823 return rcStrict;
3824
3825 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3826 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3827 * after pushing the stack frame? (Write protect the gdt + stack to
3828 * find out.) */
3829 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3830 {
3831 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3832 if (rcStrict != VINF_SUCCESS)
3833 return rcStrict;
3834 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3835 }
3836
3837 /*
3838 * Start comitting the register changes.
3839 */
3840 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3841 * hidden registers when interrupting 32-bit or 16-bit code! */
3842 if (uNewCpl != pIemCpu->uCpl)
3843 {
3844 pCtx->ss.Sel = 0 | uNewCpl;
3845 pCtx->ss.ValidSel = 0 | uNewCpl;
3846 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3847 pCtx->ss.u32Limit = UINT32_MAX;
3848 pCtx->ss.u64Base = 0;
3849 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3850 }
3851 pCtx->rsp = uNewRsp - cbStackFrame;
3852 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3853 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3854 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3855 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3856 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3857 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3858 pCtx->rip = uNewRip;
3859 pIemCpu->uCpl = uNewCpl;
3860
3861 fEfl &= ~fEflToClear;
3862 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3863
3864 if (fFlags & IEM_XCPT_FLAGS_CR2)
3865 pCtx->cr2 = uCr2;
3866
3867 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3868 iemRaiseXcptAdjustState(pCtx, u8Vector);
3869
3870 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3871}
3872
3873
3874/**
3875 * Implements exceptions and interrupts.
3876 *
3877 * All exceptions and interrupts goes thru this function!
3878 *
3879 * @returns VBox strict status code.
3880 * @param pIemCpu The IEM per CPU instance data.
3881 * @param cbInstr The number of bytes to offset rIP by in the return
3882 * address.
3883 * @param u8Vector The interrupt / exception vector number.
3884 * @param fFlags The flags.
3885 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3886 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3887 */
3888DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3889iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3890 uint8_t cbInstr,
3891 uint8_t u8Vector,
3892 uint32_t fFlags,
3893 uint16_t uErr,
3894 uint64_t uCr2)
3895{
3896 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3897#ifdef IN_RING0
3898 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3899 AssertRCReturn(rc, rc);
3900#endif
3901
3902 /*
3903 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3904 */
3905 if ( pCtx->eflags.Bits.u1VM
3906 && pCtx->eflags.Bits.u2IOPL != 3
3907 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3908 && (pCtx->cr0 & X86_CR0_PE) )
3909 {
3910 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3911 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3912 u8Vector = X86_XCPT_GP;
3913 uErr = 0;
3914 }
3915#ifdef DBGFTRACE_ENABLED
3916 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3917 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3918 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3919#endif
3920
3921 /*
3922 * Do recursion accounting.
3923 */
3924 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3925 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3926 if (pIemCpu->cXcptRecursions == 0)
3927 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3928 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3929 else
3930 {
3931 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3932 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3933
3934 /** @todo double and tripple faults. */
3935 if (pIemCpu->cXcptRecursions >= 3)
3936 {
3937#ifdef DEBUG_bird
3938 AssertFailed();
3939#endif
3940 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3941 }
3942
3943 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
3944 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
3945 {
3946 ....
3947 } */
3948 }
3949 pIemCpu->cXcptRecursions++;
3950 pIemCpu->uCurXcpt = u8Vector;
3951 pIemCpu->fCurXcpt = fFlags;
3952
3953 /*
3954 * Extensive logging.
3955 */
3956#if defined(LOG_ENABLED) && defined(IN_RING3)
3957 if (LogIs3Enabled())
3958 {
3959 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3960 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3961 char szRegs[4096];
3962 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3963 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3964 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3965 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3966 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3967 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3968 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3969 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3970 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3971 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3972 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3973 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3974 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3975 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3976 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3977 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3978 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3979 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3980 " efer=%016VR{efer}\n"
3981 " pat=%016VR{pat}\n"
3982 " sf_mask=%016VR{sf_mask}\n"
3983 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3984 " lstar=%016VR{lstar}\n"
3985 " star=%016VR{star} cstar=%016VR{cstar}\n"
3986 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3987 );
3988
3989 char szInstr[256];
3990 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3991 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3992 szInstr, sizeof(szInstr), NULL);
3993 Log3(("%s%s\n", szRegs, szInstr));
3994 }
3995#endif /* LOG_ENABLED */
3996
3997 /*
3998 * Call the mode specific worker function.
3999 */
4000 VBOXSTRICTRC rcStrict;
4001 if (!(pCtx->cr0 & X86_CR0_PE))
4002 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4003 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4004 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4005 else
4006 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4007
4008 /*
4009 * Unwind.
4010 */
4011 pIemCpu->cXcptRecursions--;
4012 pIemCpu->uCurXcpt = uPrevXcpt;
4013 pIemCpu->fCurXcpt = fPrevXcpt;
4014 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4015 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4016 return rcStrict;
4017}
4018
4019
4020/** \#DE - 00. */
4021DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4022{
4023 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4024}
4025
4026
4027/** \#DB - 01.
4028 * @note This automatically clear DR7.GD. */
4029DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4030{
4031 /** @todo set/clear RF. */
4032 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4033 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4034}
4035
4036
4037/** \#UD - 06. */
4038DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4039{
4040 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4041}
4042
4043
4044/** \#NM - 07. */
4045DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4046{
4047 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4048}
4049
4050
4051/** \#TS(err) - 0a. */
4052DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4053{
4054 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4055}
4056
4057
4058/** \#TS(tr) - 0a. */
4059DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4060{
4061 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4062 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4063}
4064
4065
4066/** \#TS(0) - 0a. */
4067DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4068{
4069 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4070 0, 0);
4071}
4072
4073
4074/** \#TS(err) - 0a. */
4075DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4076{
4077 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4078 uSel & X86_SEL_MASK_OFF_RPL, 0);
4079}
4080
4081
4082/** \#NP(err) - 0b. */
4083DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4084{
4085 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4086}
4087
4088
4089/** \#NP(seg) - 0b. */
4090DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4091{
4092 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4093 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4094}
4095
4096
4097/** \#NP(sel) - 0b. */
4098DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4099{
4100 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4101 uSel & ~X86_SEL_RPL, 0);
4102}
4103
4104
4105/** \#SS(seg) - 0c. */
4106DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4107{
4108 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4109 uSel & ~X86_SEL_RPL, 0);
4110}
4111
4112
4113/** \#SS(err) - 0c. */
4114DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4115{
4116 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4117}
4118
4119
4120/** \#GP(n) - 0d. */
4121DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4122{
4123 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4124}
4125
4126
4127/** \#GP(0) - 0d. */
4128DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4129{
4130 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4131}
4132
4133
4134/** \#GP(sel) - 0d. */
4135DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4136{
4137 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4138 Sel & ~X86_SEL_RPL, 0);
4139}
4140
4141
4142/** \#GP(0) - 0d. */
4143DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4144{
4145 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4146}
4147
4148
4149/** \#GP(sel) - 0d. */
4150DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4151{
4152 NOREF(iSegReg); NOREF(fAccess);
4153 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4154 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4155}
4156
4157
4158/** \#GP(sel) - 0d. */
4159DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4160{
4161 NOREF(Sel);
4162 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4163}
4164
4165
4166/** \#GP(sel) - 0d. */
4167DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4168{
4169 NOREF(iSegReg); NOREF(fAccess);
4170 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4171}
4172
4173
4174/** \#PF(n) - 0e. */
4175DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4176{
4177 uint16_t uErr;
4178 switch (rc)
4179 {
4180 case VERR_PAGE_NOT_PRESENT:
4181 case VERR_PAGE_TABLE_NOT_PRESENT:
4182 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4183 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4184 uErr = 0;
4185 break;
4186
4187 default:
4188 AssertMsgFailed(("%Rrc\n", rc));
4189 case VERR_ACCESS_DENIED:
4190 uErr = X86_TRAP_PF_P;
4191 break;
4192
4193 /** @todo reserved */
4194 }
4195
4196 if (pIemCpu->uCpl == 3)
4197 uErr |= X86_TRAP_PF_US;
4198
4199 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4200 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4201 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4202 uErr |= X86_TRAP_PF_ID;
4203
4204#if 0 /* This is so much non-sense, really. Why was it done like that? */
4205 /* Note! RW access callers reporting a WRITE protection fault, will clear
4206 the READ flag before calling. So, read-modify-write accesses (RW)
4207 can safely be reported as READ faults. */
4208 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4209 uErr |= X86_TRAP_PF_RW;
4210#else
4211 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4212 {
4213 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4214 uErr |= X86_TRAP_PF_RW;
4215 }
4216#endif
4217
4218 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4219 uErr, GCPtrWhere);
4220}
4221
4222
4223/** \#MF(0) - 10. */
4224DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4225{
4226 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4227}
4228
4229
4230/** \#AC(0) - 11. */
4231DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4232{
4233 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4234}
4235
4236
4237/**
4238 * Macro for calling iemCImplRaiseDivideError().
4239 *
4240 * This enables us to add/remove arguments and force different levels of
4241 * inlining as we wish.
4242 *
4243 * @return Strict VBox status code.
4244 */
4245#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4246IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4247{
4248 NOREF(cbInstr);
4249 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4250}
4251
4252
4253/**
4254 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4255 *
4256 * This enables us to add/remove arguments and force different levels of
4257 * inlining as we wish.
4258 *
4259 * @return Strict VBox status code.
4260 */
4261#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4262IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4263{
4264 NOREF(cbInstr);
4265 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4266}
4267
4268
4269/**
4270 * Macro for calling iemCImplRaiseInvalidOpcode().
4271 *
4272 * This enables us to add/remove arguments and force different levels of
4273 * inlining as we wish.
4274 *
4275 * @return Strict VBox status code.
4276 */
4277#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4278IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4279{
4280 NOREF(cbInstr);
4281 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4282}
4283
4284
4285/** @} */
4286
4287
4288/*
4289 *
4290 * Helpers routines.
4291 * Helpers routines.
4292 * Helpers routines.
4293 *
4294 */
4295
4296/**
4297 * Recalculates the effective operand size.
4298 *
4299 * @param pIemCpu The IEM state.
4300 */
4301IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4302{
4303 switch (pIemCpu->enmCpuMode)
4304 {
4305 case IEMMODE_16BIT:
4306 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4307 break;
4308 case IEMMODE_32BIT:
4309 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4310 break;
4311 case IEMMODE_64BIT:
4312 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4313 {
4314 case 0:
4315 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4316 break;
4317 case IEM_OP_PRF_SIZE_OP:
4318 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4319 break;
4320 case IEM_OP_PRF_SIZE_REX_W:
4321 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4322 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4323 break;
4324 }
4325 break;
4326 default:
4327 AssertFailed();
4328 }
4329}
4330
4331
4332/**
4333 * Sets the default operand size to 64-bit and recalculates the effective
4334 * operand size.
4335 *
4336 * @param pIemCpu The IEM state.
4337 */
4338IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4339{
4340 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4341 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4342 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4343 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4344 else
4345 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4346}
4347
4348
4349/*
4350 *
4351 * Common opcode decoders.
4352 * Common opcode decoders.
4353 * Common opcode decoders.
4354 *
4355 */
4356//#include <iprt/mem.h>
4357
4358/**
4359 * Used to add extra details about a stub case.
4360 * @param pIemCpu The IEM per CPU state.
4361 */
4362IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4363{
4364#if defined(LOG_ENABLED) && defined(IN_RING3)
4365 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4366 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4367 char szRegs[4096];
4368 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4369 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4370 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4371 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4372 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4373 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4374 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4375 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4376 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4377 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4378 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4379 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4380 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4381 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4382 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4383 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4384 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4385 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4386 " efer=%016VR{efer}\n"
4387 " pat=%016VR{pat}\n"
4388 " sf_mask=%016VR{sf_mask}\n"
4389 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4390 " lstar=%016VR{lstar}\n"
4391 " star=%016VR{star} cstar=%016VR{cstar}\n"
4392 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4393 );
4394
4395 char szInstr[256];
4396 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4397 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4398 szInstr, sizeof(szInstr), NULL);
4399
4400 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4401#else
4402 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4403#endif
4404}
4405
4406/**
4407 * Complains about a stub.
4408 *
4409 * Providing two versions of this macro, one for daily use and one for use when
4410 * working on IEM.
4411 */
4412#if 0
4413# define IEMOP_BITCH_ABOUT_STUB() \
4414 do { \
4415 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4416 iemOpStubMsg2(pIemCpu); \
4417 RTAssertPanic(); \
4418 } while (0)
4419#else
4420# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4421#endif
4422
4423/** Stubs an opcode. */
4424#define FNIEMOP_STUB(a_Name) \
4425 FNIEMOP_DEF(a_Name) \
4426 { \
4427 IEMOP_BITCH_ABOUT_STUB(); \
4428 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4429 } \
4430 typedef int ignore_semicolon
4431
4432/** Stubs an opcode. */
4433#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4434 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4435 { \
4436 IEMOP_BITCH_ABOUT_STUB(); \
4437 NOREF(a_Name0); \
4438 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4439 } \
4440 typedef int ignore_semicolon
4441
4442/** Stubs an opcode which currently should raise \#UD. */
4443#define FNIEMOP_UD_STUB(a_Name) \
4444 FNIEMOP_DEF(a_Name) \
4445 { \
4446 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4447 return IEMOP_RAISE_INVALID_OPCODE(); \
4448 } \
4449 typedef int ignore_semicolon
4450
4451/** Stubs an opcode which currently should raise \#UD. */
4452#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4453 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4454 { \
4455 NOREF(a_Name0); \
4456 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4457 return IEMOP_RAISE_INVALID_OPCODE(); \
4458 } \
4459 typedef int ignore_semicolon
4460
4461
4462
4463/** @name Register Access.
4464 * @{
4465 */
4466
4467/**
4468 * Gets a reference (pointer) to the specified hidden segment register.
4469 *
4470 * @returns Hidden register reference.
4471 * @param pIemCpu The per CPU data.
4472 * @param iSegReg The segment register.
4473 */
4474IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4475{
4476 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4477 PCPUMSELREG pSReg;
4478 switch (iSegReg)
4479 {
4480 case X86_SREG_ES: pSReg = &pCtx->es; break;
4481 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4482 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4483 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4484 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4485 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4486 default:
4487 AssertFailedReturn(NULL);
4488 }
4489#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4490 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4491 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4492#else
4493 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4494#endif
4495 return pSReg;
4496}
4497
4498
4499/**
4500 * Gets a reference (pointer) to the specified segment register (the selector
4501 * value).
4502 *
4503 * @returns Pointer to the selector variable.
4504 * @param pIemCpu The per CPU data.
4505 * @param iSegReg The segment register.
4506 */
4507IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4508{
4509 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4510 switch (iSegReg)
4511 {
4512 case X86_SREG_ES: return &pCtx->es.Sel;
4513 case X86_SREG_CS: return &pCtx->cs.Sel;
4514 case X86_SREG_SS: return &pCtx->ss.Sel;
4515 case X86_SREG_DS: return &pCtx->ds.Sel;
4516 case X86_SREG_FS: return &pCtx->fs.Sel;
4517 case X86_SREG_GS: return &pCtx->gs.Sel;
4518 }
4519 AssertFailedReturn(NULL);
4520}
4521
4522
4523/**
4524 * Fetches the selector value of a segment register.
4525 *
4526 * @returns The selector value.
4527 * @param pIemCpu The per CPU data.
4528 * @param iSegReg The segment register.
4529 */
4530IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4531{
4532 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4533 switch (iSegReg)
4534 {
4535 case X86_SREG_ES: return pCtx->es.Sel;
4536 case X86_SREG_CS: return pCtx->cs.Sel;
4537 case X86_SREG_SS: return pCtx->ss.Sel;
4538 case X86_SREG_DS: return pCtx->ds.Sel;
4539 case X86_SREG_FS: return pCtx->fs.Sel;
4540 case X86_SREG_GS: return pCtx->gs.Sel;
4541 }
4542 AssertFailedReturn(0xffff);
4543}
4544
4545
4546/**
4547 * Gets a reference (pointer) to the specified general register.
4548 *
4549 * @returns Register reference.
4550 * @param pIemCpu The per CPU data.
4551 * @param iReg The general register.
4552 */
4553IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4554{
4555 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4556 switch (iReg)
4557 {
4558 case X86_GREG_xAX: return &pCtx->rax;
4559 case X86_GREG_xCX: return &pCtx->rcx;
4560 case X86_GREG_xDX: return &pCtx->rdx;
4561 case X86_GREG_xBX: return &pCtx->rbx;
4562 case X86_GREG_xSP: return &pCtx->rsp;
4563 case X86_GREG_xBP: return &pCtx->rbp;
4564 case X86_GREG_xSI: return &pCtx->rsi;
4565 case X86_GREG_xDI: return &pCtx->rdi;
4566 case X86_GREG_x8: return &pCtx->r8;
4567 case X86_GREG_x9: return &pCtx->r9;
4568 case X86_GREG_x10: return &pCtx->r10;
4569 case X86_GREG_x11: return &pCtx->r11;
4570 case X86_GREG_x12: return &pCtx->r12;
4571 case X86_GREG_x13: return &pCtx->r13;
4572 case X86_GREG_x14: return &pCtx->r14;
4573 case X86_GREG_x15: return &pCtx->r15;
4574 }
4575 AssertFailedReturn(NULL);
4576}
4577
4578
4579/**
4580 * Gets a reference (pointer) to the specified 8-bit general register.
4581 *
4582 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4583 *
4584 * @returns Register reference.
4585 * @param pIemCpu The per CPU data.
4586 * @param iReg The register.
4587 */
4588IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4589{
4590 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4591 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4592
4593 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4594 if (iReg >= 4)
4595 pu8Reg++;
4596 return pu8Reg;
4597}
4598
4599
4600/**
4601 * Fetches the value of a 8-bit general register.
4602 *
4603 * @returns The register value.
4604 * @param pIemCpu The per CPU data.
4605 * @param iReg The register.
4606 */
4607IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4608{
4609 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4610 return *pbSrc;
4611}
4612
4613
4614/**
4615 * Fetches the value of a 16-bit general register.
4616 *
4617 * @returns The register value.
4618 * @param pIemCpu The per CPU data.
4619 * @param iReg The register.
4620 */
4621IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4622{
4623 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4624}
4625
4626
4627/**
4628 * Fetches the value of a 32-bit general register.
4629 *
4630 * @returns The register value.
4631 * @param pIemCpu The per CPU data.
4632 * @param iReg The register.
4633 */
4634IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4635{
4636 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4637}
4638
4639
4640/**
4641 * Fetches the value of a 64-bit general register.
4642 *
4643 * @returns The register value.
4644 * @param pIemCpu The per CPU data.
4645 * @param iReg The register.
4646 */
4647IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4648{
4649 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4650}
4651
4652
4653/**
4654 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4655 *
4656 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4657 * segment limit.
4658 *
4659 * @param pIemCpu The per CPU data.
4660 * @param offNextInstr The offset of the next instruction.
4661 */
4662IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4663{
4664 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4665 switch (pIemCpu->enmEffOpSize)
4666 {
4667 case IEMMODE_16BIT:
4668 {
4669 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4670 if ( uNewIp > pCtx->cs.u32Limit
4671 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4672 return iemRaiseGeneralProtectionFault0(pIemCpu);
4673 pCtx->rip = uNewIp;
4674 break;
4675 }
4676
4677 case IEMMODE_32BIT:
4678 {
4679 Assert(pCtx->rip <= UINT32_MAX);
4680 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4681
4682 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4683 if (uNewEip > pCtx->cs.u32Limit)
4684 return iemRaiseGeneralProtectionFault0(pIemCpu);
4685 pCtx->rip = uNewEip;
4686 break;
4687 }
4688
4689 case IEMMODE_64BIT:
4690 {
4691 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4692
4693 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4694 if (!IEM_IS_CANONICAL(uNewRip))
4695 return iemRaiseGeneralProtectionFault0(pIemCpu);
4696 pCtx->rip = uNewRip;
4697 break;
4698 }
4699
4700 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4701 }
4702
4703 pCtx->eflags.Bits.u1RF = 0;
4704 return VINF_SUCCESS;
4705}
4706
4707
4708/**
4709 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4710 *
4711 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4712 * segment limit.
4713 *
4714 * @returns Strict VBox status code.
4715 * @param pIemCpu The per CPU data.
4716 * @param offNextInstr The offset of the next instruction.
4717 */
4718IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4719{
4720 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4721 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4722
4723 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4724 if ( uNewIp > pCtx->cs.u32Limit
4725 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4726 return iemRaiseGeneralProtectionFault0(pIemCpu);
4727 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4728 pCtx->rip = uNewIp;
4729 pCtx->eflags.Bits.u1RF = 0;
4730
4731 return VINF_SUCCESS;
4732}
4733
4734
4735/**
4736 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4737 *
4738 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4739 * segment limit.
4740 *
4741 * @returns Strict VBox status code.
4742 * @param pIemCpu The per CPU data.
4743 * @param offNextInstr The offset of the next instruction.
4744 */
4745IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4746{
4747 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4748 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4749
4750 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4751 {
4752 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4753
4754 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4755 if (uNewEip > pCtx->cs.u32Limit)
4756 return iemRaiseGeneralProtectionFault0(pIemCpu);
4757 pCtx->rip = uNewEip;
4758 }
4759 else
4760 {
4761 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4762
4763 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4764 if (!IEM_IS_CANONICAL(uNewRip))
4765 return iemRaiseGeneralProtectionFault0(pIemCpu);
4766 pCtx->rip = uNewRip;
4767 }
4768 pCtx->eflags.Bits.u1RF = 0;
4769 return VINF_SUCCESS;
4770}
4771
4772
4773/**
4774 * Performs a near jump to the specified address.
4775 *
4776 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4777 * segment limit.
4778 *
4779 * @param pIemCpu The per CPU data.
4780 * @param uNewRip The new RIP value.
4781 */
4782IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4783{
4784 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4785 switch (pIemCpu->enmEffOpSize)
4786 {
4787 case IEMMODE_16BIT:
4788 {
4789 Assert(uNewRip <= UINT16_MAX);
4790 if ( uNewRip > pCtx->cs.u32Limit
4791 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4792 return iemRaiseGeneralProtectionFault0(pIemCpu);
4793 /** @todo Test 16-bit jump in 64-bit mode. */
4794 pCtx->rip = uNewRip;
4795 break;
4796 }
4797
4798 case IEMMODE_32BIT:
4799 {
4800 Assert(uNewRip <= UINT32_MAX);
4801 Assert(pCtx->rip <= UINT32_MAX);
4802 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4803
4804 if (uNewRip > pCtx->cs.u32Limit)
4805 return iemRaiseGeneralProtectionFault0(pIemCpu);
4806 pCtx->rip = uNewRip;
4807 break;
4808 }
4809
4810 case IEMMODE_64BIT:
4811 {
4812 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4813
4814 if (!IEM_IS_CANONICAL(uNewRip))
4815 return iemRaiseGeneralProtectionFault0(pIemCpu);
4816 pCtx->rip = uNewRip;
4817 break;
4818 }
4819
4820 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4821 }
4822
4823 pCtx->eflags.Bits.u1RF = 0;
4824 return VINF_SUCCESS;
4825}
4826
4827
4828/**
4829 * Get the address of the top of the stack.
4830 *
4831 * @param pIemCpu The per CPU data.
4832 * @param pCtx The CPU context which SP/ESP/RSP should be
4833 * read.
4834 */
4835DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4836{
4837 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4838 return pCtx->rsp;
4839 if (pCtx->ss.Attr.n.u1DefBig)
4840 return pCtx->esp;
4841 return pCtx->sp;
4842}
4843
4844
4845/**
4846 * Updates the RIP/EIP/IP to point to the next instruction.
4847 *
4848 * This function leaves the EFLAGS.RF flag alone.
4849 *
4850 * @param pIemCpu The per CPU data.
4851 * @param cbInstr The number of bytes to add.
4852 */
4853IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4854{
4855 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4856 switch (pIemCpu->enmCpuMode)
4857 {
4858 case IEMMODE_16BIT:
4859 Assert(pCtx->rip <= UINT16_MAX);
4860 pCtx->eip += cbInstr;
4861 pCtx->eip &= UINT32_C(0xffff);
4862 break;
4863
4864 case IEMMODE_32BIT:
4865 pCtx->eip += cbInstr;
4866 Assert(pCtx->rip <= UINT32_MAX);
4867 break;
4868
4869 case IEMMODE_64BIT:
4870 pCtx->rip += cbInstr;
4871 break;
4872 default: AssertFailed();
4873 }
4874}
4875
4876
4877#if 0
4878/**
4879 * Updates the RIP/EIP/IP to point to the next instruction.
4880 *
4881 * @param pIemCpu The per CPU data.
4882 */
4883IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4884{
4885 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4886}
4887#endif
4888
4889
4890
4891/**
4892 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4893 *
4894 * @param pIemCpu The per CPU data.
4895 * @param cbInstr The number of bytes to add.
4896 */
4897IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4898{
4899 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4900
4901 pCtx->eflags.Bits.u1RF = 0;
4902
4903 switch (pIemCpu->enmCpuMode)
4904 {
4905 case IEMMODE_16BIT:
4906 Assert(pCtx->rip <= UINT16_MAX);
4907 pCtx->eip += cbInstr;
4908 pCtx->eip &= UINT32_C(0xffff);
4909 break;
4910
4911 case IEMMODE_32BIT:
4912 pCtx->eip += cbInstr;
4913 Assert(pCtx->rip <= UINT32_MAX);
4914 break;
4915
4916 case IEMMODE_64BIT:
4917 pCtx->rip += cbInstr;
4918 break;
4919 default: AssertFailed();
4920 }
4921}
4922
4923
4924/**
4925 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4926 *
4927 * @param pIemCpu The per CPU data.
4928 */
4929IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4930{
4931 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4932}
4933
4934
4935/**
4936 * Adds to the stack pointer.
4937 *
4938 * @param pIemCpu The per CPU data.
4939 * @param pCtx The CPU context which SP/ESP/RSP should be
4940 * updated.
4941 * @param cbToAdd The number of bytes to add.
4942 */
4943DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
4944{
4945 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4946 pCtx->rsp += cbToAdd;
4947 else if (pCtx->ss.Attr.n.u1DefBig)
4948 pCtx->esp += cbToAdd;
4949 else
4950 pCtx->sp += cbToAdd;
4951}
4952
4953
4954/**
4955 * Subtracts from the stack pointer.
4956 *
4957 * @param pIemCpu The per CPU data.
4958 * @param pCtx The CPU context which SP/ESP/RSP should be
4959 * updated.
4960 * @param cbToSub The number of bytes to subtract.
4961 */
4962DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4963{
4964 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4965 pCtx->rsp -= cbToSub;
4966 else if (pCtx->ss.Attr.n.u1DefBig)
4967 pCtx->esp -= cbToSub;
4968 else
4969 pCtx->sp -= cbToSub;
4970}
4971
4972
4973/**
4974 * Adds to the temporary stack pointer.
4975 *
4976 * @param pIemCpu The per CPU data.
4977 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4978 * @param cbToAdd The number of bytes to add.
4979 * @param pCtx Where to get the current stack mode.
4980 */
4981DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4982{
4983 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4984 pTmpRsp->u += cbToAdd;
4985 else if (pCtx->ss.Attr.n.u1DefBig)
4986 pTmpRsp->DWords.dw0 += cbToAdd;
4987 else
4988 pTmpRsp->Words.w0 += cbToAdd;
4989}
4990
4991
4992/**
4993 * Subtracts from the temporary stack pointer.
4994 *
4995 * @param pIemCpu The per CPU data.
4996 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4997 * @param cbToSub The number of bytes to subtract.
4998 * @param pCtx Where to get the current stack mode.
4999 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5000 * expecting that.
5001 */
5002DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5003{
5004 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5005 pTmpRsp->u -= cbToSub;
5006 else if (pCtx->ss.Attr.n.u1DefBig)
5007 pTmpRsp->DWords.dw0 -= cbToSub;
5008 else
5009 pTmpRsp->Words.w0 -= cbToSub;
5010}
5011
5012
5013/**
5014 * Calculates the effective stack address for a push of the specified size as
5015 * well as the new RSP value (upper bits may be masked).
5016 *
5017 * @returns Effective stack addressf for the push.
5018 * @param pIemCpu The IEM per CPU data.
5019 * @param pCtx Where to get the current stack mode.
5020 * @param cbItem The size of the stack item to pop.
5021 * @param puNewRsp Where to return the new RSP value.
5022 */
5023DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5024{
5025 RTUINT64U uTmpRsp;
5026 RTGCPTR GCPtrTop;
5027 uTmpRsp.u = pCtx->rsp;
5028
5029 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5030 GCPtrTop = uTmpRsp.u -= cbItem;
5031 else if (pCtx->ss.Attr.n.u1DefBig)
5032 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5033 else
5034 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5035 *puNewRsp = uTmpRsp.u;
5036 return GCPtrTop;
5037}
5038
5039
5040/**
5041 * Gets the current stack pointer and calculates the value after a pop of the
5042 * specified size.
5043 *
5044 * @returns Current stack pointer.
5045 * @param pIemCpu The per CPU data.
5046 * @param pCtx Where to get the current stack mode.
5047 * @param cbItem The size of the stack item to pop.
5048 * @param puNewRsp Where to return the new RSP value.
5049 */
5050DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5051{
5052 RTUINT64U uTmpRsp;
5053 RTGCPTR GCPtrTop;
5054 uTmpRsp.u = pCtx->rsp;
5055
5056 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5057 {
5058 GCPtrTop = uTmpRsp.u;
5059 uTmpRsp.u += cbItem;
5060 }
5061 else if (pCtx->ss.Attr.n.u1DefBig)
5062 {
5063 GCPtrTop = uTmpRsp.DWords.dw0;
5064 uTmpRsp.DWords.dw0 += cbItem;
5065 }
5066 else
5067 {
5068 GCPtrTop = uTmpRsp.Words.w0;
5069 uTmpRsp.Words.w0 += cbItem;
5070 }
5071 *puNewRsp = uTmpRsp.u;
5072 return GCPtrTop;
5073}
5074
5075
5076/**
5077 * Calculates the effective stack address for a push of the specified size as
5078 * well as the new temporary RSP value (upper bits may be masked).
5079 *
5080 * @returns Effective stack addressf for the push.
5081 * @param pIemCpu The per CPU data.
5082 * @param pTmpRsp The temporary stack pointer. This is updated.
5083 * @param cbItem The size of the stack item to pop.
5084 * @param puNewRsp Where to return the new RSP value.
5085 */
5086DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5087{
5088 RTGCPTR GCPtrTop;
5089
5090 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5091 GCPtrTop = pTmpRsp->u -= cbItem;
5092 else if (pCtx->ss.Attr.n.u1DefBig)
5093 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5094 else
5095 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5096 return GCPtrTop;
5097}
5098
5099
5100/**
5101 * Gets the effective stack address for a pop of the specified size and
5102 * calculates and updates the temporary RSP.
5103 *
5104 * @returns Current stack pointer.
5105 * @param pIemCpu The per CPU data.
5106 * @param pTmpRsp The temporary stack pointer. This is updated.
5107 * @param pCtx Where to get the current stack mode.
5108 * @param cbItem The size of the stack item to pop.
5109 */
5110DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5111{
5112 RTGCPTR GCPtrTop;
5113 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5114 {
5115 GCPtrTop = pTmpRsp->u;
5116 pTmpRsp->u += cbItem;
5117 }
5118 else if (pCtx->ss.Attr.n.u1DefBig)
5119 {
5120 GCPtrTop = pTmpRsp->DWords.dw0;
5121 pTmpRsp->DWords.dw0 += cbItem;
5122 }
5123 else
5124 {
5125 GCPtrTop = pTmpRsp->Words.w0;
5126 pTmpRsp->Words.w0 += cbItem;
5127 }
5128 return GCPtrTop;
5129}
5130
5131/** @} */
5132
5133
5134/** @name FPU access and helpers.
5135 *
5136 * @{
5137 */
5138
5139
5140/**
5141 * Hook for preparing to use the host FPU.
5142 *
5143 * This is necessary in ring-0 and raw-mode context.
5144 *
5145 * @param pIemCpu The IEM per CPU data.
5146 */
5147DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5148{
5149#ifdef IN_RING3
5150 NOREF(pIemCpu);
5151#else
5152/** @todo RZ: FIXME */
5153//# error "Implement me"
5154#endif
5155}
5156
5157
5158/**
5159 * Hook for preparing to use the host FPU for SSE
5160 *
5161 * This is necessary in ring-0 and raw-mode context.
5162 *
5163 * @param pIemCpu The IEM per CPU data.
5164 */
5165DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5166{
5167 iemFpuPrepareUsage(pIemCpu);
5168}
5169
5170
5171/**
5172 * Stores a QNaN value into a FPU register.
5173 *
5174 * @param pReg Pointer to the register.
5175 */
5176DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5177{
5178 pReg->au32[0] = UINT32_C(0x00000000);
5179 pReg->au32[1] = UINT32_C(0xc0000000);
5180 pReg->au16[4] = UINT16_C(0xffff);
5181}
5182
5183
5184/**
5185 * Updates the FOP, FPU.CS and FPUIP registers.
5186 *
5187 * @param pIemCpu The IEM per CPU data.
5188 * @param pCtx The CPU context.
5189 * @param pFpuCtx The FPU context.
5190 */
5191DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5192{
5193 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5194 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5195 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5196 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5197 {
5198 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5199 * happens in real mode here based on the fnsave and fnstenv images. */
5200 pFpuCtx->CS = 0;
5201 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5202 }
5203 else
5204 {
5205 pFpuCtx->CS = pCtx->cs.Sel;
5206 pFpuCtx->FPUIP = pCtx->rip;
5207 }
5208}
5209
5210
5211/**
5212 * Updates the x87.DS and FPUDP registers.
5213 *
5214 * @param pIemCpu The IEM per CPU data.
5215 * @param pCtx The CPU context.
5216 * @param pFpuCtx The FPU context.
5217 * @param iEffSeg The effective segment register.
5218 * @param GCPtrEff The effective address relative to @a iEffSeg.
5219 */
5220DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5221{
5222 RTSEL sel;
5223 switch (iEffSeg)
5224 {
5225 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5226 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5227 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5228 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5229 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5230 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5231 default:
5232 AssertMsgFailed(("%d\n", iEffSeg));
5233 sel = pCtx->ds.Sel;
5234 }
5235 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5236 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5237 {
5238 pFpuCtx->DS = 0;
5239 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5240 }
5241 else
5242 {
5243 pFpuCtx->DS = sel;
5244 pFpuCtx->FPUDP = GCPtrEff;
5245 }
5246}
5247
5248
5249/**
5250 * Rotates the stack registers in the push direction.
5251 *
5252 * @param pFpuCtx The FPU context.
5253 * @remarks This is a complete waste of time, but fxsave stores the registers in
5254 * stack order.
5255 */
5256DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5257{
5258 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5259 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5260 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5261 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5262 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5263 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5264 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5265 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5266 pFpuCtx->aRegs[0].r80 = r80Tmp;
5267}
5268
5269
5270/**
5271 * Rotates the stack registers in the pop direction.
5272 *
5273 * @param pFpuCtx The FPU context.
5274 * @remarks This is a complete waste of time, but fxsave stores the registers in
5275 * stack order.
5276 */
5277DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5278{
5279 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5280 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5281 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5282 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5283 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5284 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5285 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5286 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5287 pFpuCtx->aRegs[7].r80 = r80Tmp;
5288}
5289
5290
5291/**
5292 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5293 * exception prevents it.
5294 *
5295 * @param pIemCpu The IEM per CPU data.
5296 * @param pResult The FPU operation result to push.
5297 * @param pFpuCtx The FPU context.
5298 */
5299IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5300{
5301 /* Update FSW and bail if there are pending exceptions afterwards. */
5302 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5303 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5304 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5305 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5306 {
5307 pFpuCtx->FSW = fFsw;
5308 return;
5309 }
5310
5311 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5312 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5313 {
5314 /* All is fine, push the actual value. */
5315 pFpuCtx->FTW |= RT_BIT(iNewTop);
5316 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5317 }
5318 else if (pFpuCtx->FCW & X86_FCW_IM)
5319 {
5320 /* Masked stack overflow, push QNaN. */
5321 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5322 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5323 }
5324 else
5325 {
5326 /* Raise stack overflow, don't push anything. */
5327 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5328 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5329 return;
5330 }
5331
5332 fFsw &= ~X86_FSW_TOP_MASK;
5333 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5334 pFpuCtx->FSW = fFsw;
5335
5336 iemFpuRotateStackPush(pFpuCtx);
5337}
5338
5339
5340/**
5341 * Stores a result in a FPU register and updates the FSW and FTW.
5342 *
5343 * @param pFpuCtx The FPU context.
5344 * @param pResult The result to store.
5345 * @param iStReg Which FPU register to store it in.
5346 */
5347IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5348{
5349 Assert(iStReg < 8);
5350 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5351 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5352 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5353 pFpuCtx->FTW |= RT_BIT(iReg);
5354 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5355}
5356
5357
5358/**
5359 * Only updates the FPU status word (FSW) with the result of the current
5360 * instruction.
5361 *
5362 * @param pFpuCtx The FPU context.
5363 * @param u16FSW The FSW output of the current instruction.
5364 */
5365IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5366{
5367 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5368 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5369}
5370
5371
5372/**
5373 * Pops one item off the FPU stack if no pending exception prevents it.
5374 *
5375 * @param pFpuCtx The FPU context.
5376 */
5377IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5378{
5379 /* Check pending exceptions. */
5380 uint16_t uFSW = pFpuCtx->FSW;
5381 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5382 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5383 return;
5384
5385 /* TOP--. */
5386 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5387 uFSW &= ~X86_FSW_TOP_MASK;
5388 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5389 pFpuCtx->FSW = uFSW;
5390
5391 /* Mark the previous ST0 as empty. */
5392 iOldTop >>= X86_FSW_TOP_SHIFT;
5393 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5394
5395 /* Rotate the registers. */
5396 iemFpuRotateStackPop(pFpuCtx);
5397}
5398
5399
5400/**
5401 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5402 *
5403 * @param pIemCpu The IEM per CPU data.
5404 * @param pResult The FPU operation result to push.
5405 */
5406IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5407{
5408 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5409 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5410 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5411 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5412}
5413
5414
5415/**
5416 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5417 * and sets FPUDP and FPUDS.
5418 *
5419 * @param pIemCpu The IEM per CPU data.
5420 * @param pResult The FPU operation result to push.
5421 * @param iEffSeg The effective segment register.
5422 * @param GCPtrEff The effective address relative to @a iEffSeg.
5423 */
5424IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5425{
5426 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5427 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5428 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5429 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5430 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5431}
5432
5433
5434/**
5435 * Replace ST0 with the first value and push the second onto the FPU stack,
5436 * unless a pending exception prevents it.
5437 *
5438 * @param pIemCpu The IEM per CPU data.
5439 * @param pResult The FPU operation result to store and push.
5440 */
5441IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5442{
5443 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5444 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5445 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5446
5447 /* Update FSW and bail if there are pending exceptions afterwards. */
5448 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5449 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5450 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5451 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5452 {
5453 pFpuCtx->FSW = fFsw;
5454 return;
5455 }
5456
5457 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5458 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5459 {
5460 /* All is fine, push the actual value. */
5461 pFpuCtx->FTW |= RT_BIT(iNewTop);
5462 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5463 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5464 }
5465 else if (pFpuCtx->FCW & X86_FCW_IM)
5466 {
5467 /* Masked stack overflow, push QNaN. */
5468 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5469 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5470 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5471 }
5472 else
5473 {
5474 /* Raise stack overflow, don't push anything. */
5475 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5476 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5477 return;
5478 }
5479
5480 fFsw &= ~X86_FSW_TOP_MASK;
5481 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5482 pFpuCtx->FSW = fFsw;
5483
5484 iemFpuRotateStackPush(pFpuCtx);
5485}
5486
5487
5488/**
5489 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5490 * FOP.
5491 *
5492 * @param pIemCpu The IEM per CPU data.
5493 * @param pResult The result to store.
5494 * @param iStReg Which FPU register to store it in.
5495 * @param pCtx The CPU context.
5496 */
5497IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5498{
5499 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5500 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5501 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5502 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5503}
5504
5505
5506/**
5507 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5508 * FOP, and then pops the stack.
5509 *
5510 * @param pIemCpu The IEM per CPU data.
5511 * @param pResult The result to store.
5512 * @param iStReg Which FPU register to store it in.
5513 * @param pCtx The CPU context.
5514 */
5515IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5516{
5517 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5518 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5519 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5520 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5521 iemFpuMaybePopOne(pFpuCtx);
5522}
5523
5524
5525/**
5526 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5527 * FPUDP, and FPUDS.
5528 *
5529 * @param pIemCpu The IEM per CPU data.
5530 * @param pResult The result to store.
5531 * @param iStReg Which FPU register to store it in.
5532 * @param pCtx The CPU context.
5533 * @param iEffSeg The effective memory operand selector register.
5534 * @param GCPtrEff The effective memory operand offset.
5535 */
5536IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5537{
5538 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5539 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5540 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5541 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5542 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5543}
5544
5545
5546/**
5547 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5548 * FPUDP, and FPUDS, and then pops the stack.
5549 *
5550 * @param pIemCpu The IEM per CPU data.
5551 * @param pResult The result to store.
5552 * @param iStReg Which FPU register to store it in.
5553 * @param pCtx The CPU context.
5554 * @param iEffSeg The effective memory operand selector register.
5555 * @param GCPtrEff The effective memory operand offset.
5556 */
5557IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5558 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5559{
5560 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5561 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5562 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5563 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5564 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5565 iemFpuMaybePopOne(pFpuCtx);
5566}
5567
5568
5569/**
5570 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5571 *
5572 * @param pIemCpu The IEM per CPU data.
5573 */
5574IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5575{
5576 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5577 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5578 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5579}
5580
5581
5582/**
5583 * Marks the specified stack register as free (for FFREE).
5584 *
5585 * @param pIemCpu The IEM per CPU data.
5586 * @param iStReg The register to free.
5587 */
5588IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5589{
5590 Assert(iStReg < 8);
5591 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5592 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5593 pFpuCtx->FTW &= ~RT_BIT(iReg);
5594}
5595
5596
5597/**
5598 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5599 *
5600 * @param pIemCpu The IEM per CPU data.
5601 */
5602IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5603{
5604 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5605 uint16_t uFsw = pFpuCtx->FSW;
5606 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5607 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5608 uFsw &= ~X86_FSW_TOP_MASK;
5609 uFsw |= uTop;
5610 pFpuCtx->FSW = uFsw;
5611}
5612
5613
5614/**
5615 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5616 *
5617 * @param pIemCpu The IEM per CPU data.
5618 */
5619IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5620{
5621 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5622 uint16_t uFsw = pFpuCtx->FSW;
5623 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5624 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5625 uFsw &= ~X86_FSW_TOP_MASK;
5626 uFsw |= uTop;
5627 pFpuCtx->FSW = uFsw;
5628}
5629
5630
5631/**
5632 * Updates the FSW, FOP, FPUIP, and FPUCS.
5633 *
5634 * @param pIemCpu The IEM per CPU data.
5635 * @param u16FSW The FSW from the current instruction.
5636 */
5637IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5638{
5639 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5640 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5641 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5642 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5643}
5644
5645
5646/**
5647 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5648 *
5649 * @param pIemCpu The IEM per CPU data.
5650 * @param u16FSW The FSW from the current instruction.
5651 */
5652IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5653{
5654 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5655 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5656 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5657 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5658 iemFpuMaybePopOne(pFpuCtx);
5659}
5660
5661
5662/**
5663 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5664 *
5665 * @param pIemCpu The IEM per CPU data.
5666 * @param u16FSW The FSW from the current instruction.
5667 * @param iEffSeg The effective memory operand selector register.
5668 * @param GCPtrEff The effective memory operand offset.
5669 */
5670IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5671{
5672 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5673 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5674 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5675 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5676 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5677}
5678
5679
5680/**
5681 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5682 *
5683 * @param pIemCpu The IEM per CPU data.
5684 * @param u16FSW The FSW from the current instruction.
5685 */
5686IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5687{
5688 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5689 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5690 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5691 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5692 iemFpuMaybePopOne(pFpuCtx);
5693 iemFpuMaybePopOne(pFpuCtx);
5694}
5695
5696
5697/**
5698 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5699 *
5700 * @param pIemCpu The IEM per CPU data.
5701 * @param u16FSW The FSW from the current instruction.
5702 * @param iEffSeg The effective memory operand selector register.
5703 * @param GCPtrEff The effective memory operand offset.
5704 */
5705IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5706{
5707 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5708 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5709 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5710 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5711 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5712 iemFpuMaybePopOne(pFpuCtx);
5713}
5714
5715
5716/**
5717 * Worker routine for raising an FPU stack underflow exception.
5718 *
5719 * @param pIemCpu The IEM per CPU data.
5720 * @param pFpuCtx The FPU context.
5721 * @param iStReg The stack register being accessed.
5722 */
5723IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5724{
5725 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5726 if (pFpuCtx->FCW & X86_FCW_IM)
5727 {
5728 /* Masked underflow. */
5729 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5730 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5731 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5732 if (iStReg != UINT8_MAX)
5733 {
5734 pFpuCtx->FTW |= RT_BIT(iReg);
5735 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5736 }
5737 }
5738 else
5739 {
5740 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5741 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5742 }
5743}
5744
5745
5746/**
5747 * Raises a FPU stack underflow exception.
5748 *
5749 * @param pIemCpu The IEM per CPU data.
5750 * @param iStReg The destination register that should be loaded
5751 * with QNaN if \#IS is not masked. Specify
5752 * UINT8_MAX if none (like for fcom).
5753 */
5754DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5755{
5756 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5757 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5758 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5759 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5760}
5761
5762
5763DECL_NO_INLINE(IEM_STATIC, void)
5764iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5765{
5766 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5767 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5768 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5769 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5770 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5771}
5772
5773
5774DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5775{
5776 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5777 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5778 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5779 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5780 iemFpuMaybePopOne(pFpuCtx);
5781}
5782
5783
5784DECL_NO_INLINE(IEM_STATIC, void)
5785iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5786{
5787 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5788 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5789 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5790 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5791 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5792 iemFpuMaybePopOne(pFpuCtx);
5793}
5794
5795
5796DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5797{
5798 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5799 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5800 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5801 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5802 iemFpuMaybePopOne(pFpuCtx);
5803 iemFpuMaybePopOne(pFpuCtx);
5804}
5805
5806
5807DECL_NO_INLINE(IEM_STATIC, void)
5808iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5809{
5810 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5811 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5812 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5813
5814 if (pFpuCtx->FCW & X86_FCW_IM)
5815 {
5816 /* Masked overflow - Push QNaN. */
5817 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5818 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5819 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5820 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5821 pFpuCtx->FTW |= RT_BIT(iNewTop);
5822 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5823 iemFpuRotateStackPush(pFpuCtx);
5824 }
5825 else
5826 {
5827 /* Exception pending - don't change TOP or the register stack. */
5828 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5829 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5830 }
5831}
5832
5833
5834DECL_NO_INLINE(IEM_STATIC, void)
5835iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5836{
5837 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5838 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5839 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5840
5841 if (pFpuCtx->FCW & X86_FCW_IM)
5842 {
5843 /* Masked overflow - Push QNaN. */
5844 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5845 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5846 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5847 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5848 pFpuCtx->FTW |= RT_BIT(iNewTop);
5849 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5850 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5851 iemFpuRotateStackPush(pFpuCtx);
5852 }
5853 else
5854 {
5855 /* Exception pending - don't change TOP or the register stack. */
5856 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5857 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5858 }
5859}
5860
5861
5862/**
5863 * Worker routine for raising an FPU stack overflow exception on a push.
5864 *
5865 * @param pFpuCtx The FPU context.
5866 */
5867IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5868{
5869 if (pFpuCtx->FCW & X86_FCW_IM)
5870 {
5871 /* Masked overflow. */
5872 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5873 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5874 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5875 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5876 pFpuCtx->FTW |= RT_BIT(iNewTop);
5877 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5878 iemFpuRotateStackPush(pFpuCtx);
5879 }
5880 else
5881 {
5882 /* Exception pending - don't change TOP or the register stack. */
5883 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5884 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5885 }
5886}
5887
5888
5889/**
5890 * Raises a FPU stack overflow exception on a push.
5891 *
5892 * @param pIemCpu The IEM per CPU data.
5893 */
5894DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5895{
5896 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5897 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5898 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5899 iemFpuStackPushOverflowOnly(pFpuCtx);
5900}
5901
5902
5903/**
5904 * Raises a FPU stack overflow exception on a push with a memory operand.
5905 *
5906 * @param pIemCpu The IEM per CPU data.
5907 * @param iEffSeg The effective memory operand selector register.
5908 * @param GCPtrEff The effective memory operand offset.
5909 */
5910DECL_NO_INLINE(IEM_STATIC, void)
5911iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5912{
5913 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5914 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5915 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5916 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5917 iemFpuStackPushOverflowOnly(pFpuCtx);
5918}
5919
5920
5921IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5922{
5923 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5924 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5925 if (pFpuCtx->FTW & RT_BIT(iReg))
5926 return VINF_SUCCESS;
5927 return VERR_NOT_FOUND;
5928}
5929
5930
5931IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5932{
5933 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5934 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5935 if (pFpuCtx->FTW & RT_BIT(iReg))
5936 {
5937 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
5938 return VINF_SUCCESS;
5939 }
5940 return VERR_NOT_FOUND;
5941}
5942
5943
5944IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5945 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5946{
5947 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5948 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5949 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5950 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5951 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5952 {
5953 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5954 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
5955 return VINF_SUCCESS;
5956 }
5957 return VERR_NOT_FOUND;
5958}
5959
5960
5961IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5962{
5963 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5964 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5965 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5966 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5967 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5968 {
5969 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5970 return VINF_SUCCESS;
5971 }
5972 return VERR_NOT_FOUND;
5973}
5974
5975
5976/**
5977 * Updates the FPU exception status after FCW is changed.
5978 *
5979 * @param pFpuCtx The FPU context.
5980 */
5981IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
5982{
5983 uint16_t u16Fsw = pFpuCtx->FSW;
5984 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
5985 u16Fsw |= X86_FSW_ES | X86_FSW_B;
5986 else
5987 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
5988 pFpuCtx->FSW = u16Fsw;
5989}
5990
5991
5992/**
5993 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
5994 *
5995 * @returns The full FTW.
5996 * @param pFpuCtx The FPU context.
5997 */
5998IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
5999{
6000 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6001 uint16_t u16Ftw = 0;
6002 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6003 for (unsigned iSt = 0; iSt < 8; iSt++)
6004 {
6005 unsigned const iReg = (iSt + iTop) & 7;
6006 if (!(u8Ftw & RT_BIT(iReg)))
6007 u16Ftw |= 3 << (iReg * 2); /* empty */
6008 else
6009 {
6010 uint16_t uTag;
6011 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6012 if (pr80Reg->s.uExponent == 0x7fff)
6013 uTag = 2; /* Exponent is all 1's => Special. */
6014 else if (pr80Reg->s.uExponent == 0x0000)
6015 {
6016 if (pr80Reg->s.u64Mantissa == 0x0000)
6017 uTag = 1; /* All bits are zero => Zero. */
6018 else
6019 uTag = 2; /* Must be special. */
6020 }
6021 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6022 uTag = 0; /* Valid. */
6023 else
6024 uTag = 2; /* Must be special. */
6025
6026 u16Ftw |= uTag << (iReg * 2); /* empty */
6027 }
6028 }
6029
6030 return u16Ftw;
6031}
6032
6033
6034/**
6035 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6036 *
6037 * @returns The compressed FTW.
6038 * @param u16FullFtw The full FTW to convert.
6039 */
6040IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6041{
6042 uint8_t u8Ftw = 0;
6043 for (unsigned i = 0; i < 8; i++)
6044 {
6045 if ((u16FullFtw & 3) != 3 /*empty*/)
6046 u8Ftw |= RT_BIT(i);
6047 u16FullFtw >>= 2;
6048 }
6049
6050 return u8Ftw;
6051}
6052
6053/** @} */
6054
6055
6056/** @name Memory access.
6057 *
6058 * @{
6059 */
6060
6061
6062/**
6063 * Updates the IEMCPU::cbWritten counter if applicable.
6064 *
6065 * @param pIemCpu The IEM per CPU data.
6066 * @param fAccess The access being accounted for.
6067 * @param cbMem The access size.
6068 */
6069DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6070{
6071 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6072 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6073 pIemCpu->cbWritten += (uint32_t)cbMem;
6074}
6075
6076
6077/**
6078 * Checks if the given segment can be written to, raise the appropriate
6079 * exception if not.
6080 *
6081 * @returns VBox strict status code.
6082 *
6083 * @param pIemCpu The IEM per CPU data.
6084 * @param pHid Pointer to the hidden register.
6085 * @param iSegReg The register number.
6086 * @param pu64BaseAddr Where to return the base address to use for the
6087 * segment. (In 64-bit code it may differ from the
6088 * base in the hidden segment.)
6089 */
6090IEM_STATIC VBOXSTRICTRC
6091iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6092{
6093 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6094 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6095 else
6096 {
6097 if (!pHid->Attr.n.u1Present)
6098 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6099
6100 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6101 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6102 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6103 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6104 *pu64BaseAddr = pHid->u64Base;
6105 }
6106 return VINF_SUCCESS;
6107}
6108
6109
6110/**
6111 * Checks if the given segment can be read from, raise the appropriate
6112 * exception if not.
6113 *
6114 * @returns VBox strict status code.
6115 *
6116 * @param pIemCpu The IEM per CPU data.
6117 * @param pHid Pointer to the hidden register.
6118 * @param iSegReg The register number.
6119 * @param pu64BaseAddr Where to return the base address to use for the
6120 * segment. (In 64-bit code it may differ from the
6121 * base in the hidden segment.)
6122 */
6123IEM_STATIC VBOXSTRICTRC
6124iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6125{
6126 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6127 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6128 else
6129 {
6130 if (!pHid->Attr.n.u1Present)
6131 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6132
6133 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6134 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6135 *pu64BaseAddr = pHid->u64Base;
6136 }
6137 return VINF_SUCCESS;
6138}
6139
6140
6141/**
6142 * Applies the segment limit, base and attributes.
6143 *
6144 * This may raise a \#GP or \#SS.
6145 *
6146 * @returns VBox strict status code.
6147 *
6148 * @param pIemCpu The IEM per CPU data.
6149 * @param fAccess The kind of access which is being performed.
6150 * @param iSegReg The index of the segment register to apply.
6151 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6152 * TSS, ++).
6153 * @param pGCPtrMem Pointer to the guest memory address to apply
6154 * segmentation to. Input and output parameter.
6155 */
6156IEM_STATIC VBOXSTRICTRC
6157iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6158{
6159 if (iSegReg == UINT8_MAX)
6160 return VINF_SUCCESS;
6161
6162 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6163 switch (pIemCpu->enmCpuMode)
6164 {
6165 case IEMMODE_16BIT:
6166 case IEMMODE_32BIT:
6167 {
6168 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6169 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6170
6171 Assert(pSel->Attr.n.u1Present);
6172 Assert(pSel->Attr.n.u1DescType);
6173 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6174 {
6175 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6176 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6177 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6178
6179 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6180 {
6181 /** @todo CPL check. */
6182 }
6183
6184 /*
6185 * There are two kinds of data selectors, normal and expand down.
6186 */
6187 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6188 {
6189 if ( GCPtrFirst32 > pSel->u32Limit
6190 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6191 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6192 }
6193 else
6194 {
6195 /*
6196 * The upper boundary is defined by the B bit, not the G bit!
6197 */
6198 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6199 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6200 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6201 }
6202 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6203 }
6204 else
6205 {
6206
6207 /*
6208 * Code selector and usually be used to read thru, writing is
6209 * only permitted in real and V8086 mode.
6210 */
6211 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6212 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6213 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6214 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6215 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6216
6217 if ( GCPtrFirst32 > pSel->u32Limit
6218 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6219 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6220
6221 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6222 {
6223 /** @todo CPL check. */
6224 }
6225
6226 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6227 }
6228 return VINF_SUCCESS;
6229 }
6230
6231 case IEMMODE_64BIT:
6232 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6233 *pGCPtrMem += pSel->u64Base;
6234 return VINF_SUCCESS;
6235
6236 default:
6237 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
6238 }
6239}
6240
6241
6242/**
6243 * Translates a virtual address to a physical physical address and checks if we
6244 * can access the page as specified.
6245 *
6246 * @param pIemCpu The IEM per CPU data.
6247 * @param GCPtrMem The virtual address.
6248 * @param fAccess The intended access.
6249 * @param pGCPhysMem Where to return the physical address.
6250 */
6251IEM_STATIC VBOXSTRICTRC
6252iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6253{
6254 /** @todo Need a different PGM interface here. We're currently using
6255 * generic / REM interfaces. this won't cut it for R0 & RC. */
6256 RTGCPHYS GCPhys;
6257 uint64_t fFlags;
6258 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6259 if (RT_FAILURE(rc))
6260 {
6261 /** @todo Check unassigned memory in unpaged mode. */
6262 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6263 *pGCPhysMem = NIL_RTGCPHYS;
6264 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6265 }
6266
6267 /* If the page is writable and does not have the no-exec bit set, all
6268 access is allowed. Otherwise we'll have to check more carefully... */
6269 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6270 {
6271 /* Write to read only memory? */
6272 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6273 && !(fFlags & X86_PTE_RW)
6274 && ( pIemCpu->uCpl != 0
6275 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6276 {
6277 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6278 *pGCPhysMem = NIL_RTGCPHYS;
6279 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6280 }
6281
6282 /* Kernel memory accessed by userland? */
6283 if ( !(fFlags & X86_PTE_US)
6284 && pIemCpu->uCpl == 3
6285 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6286 {
6287 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6288 *pGCPhysMem = NIL_RTGCPHYS;
6289 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6290 }
6291
6292 /* Executing non-executable memory? */
6293 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6294 && (fFlags & X86_PTE_PAE_NX)
6295 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6296 {
6297 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6298 *pGCPhysMem = NIL_RTGCPHYS;
6299 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6300 VERR_ACCESS_DENIED);
6301 }
6302 }
6303
6304 /*
6305 * Set the dirty / access flags.
6306 * ASSUMES this is set when the address is translated rather than on committ...
6307 */
6308 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6309 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6310 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6311 {
6312 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6313 AssertRC(rc2);
6314 }
6315
6316 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6317 *pGCPhysMem = GCPhys;
6318 return VINF_SUCCESS;
6319}
6320
6321
6322
6323/**
6324 * Maps a physical page.
6325 *
6326 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6327 * @param pIemCpu The IEM per CPU data.
6328 * @param GCPhysMem The physical address.
6329 * @param fAccess The intended access.
6330 * @param ppvMem Where to return the mapping address.
6331 * @param pLock The PGM lock.
6332 */
6333IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6334{
6335#ifdef IEM_VERIFICATION_MODE_FULL
6336 /* Force the alternative path so we can ignore writes. */
6337 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6338 {
6339 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6340 {
6341 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6342 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6343 if (RT_FAILURE(rc2))
6344 pIemCpu->fProblematicMemory = true;
6345 }
6346 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6347 }
6348#endif
6349#ifdef IEM_LOG_MEMORY_WRITES
6350 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6351 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6352#endif
6353#ifdef IEM_VERIFICATION_MODE_MINIMAL
6354 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6355#endif
6356
6357 /** @todo This API may require some improving later. A private deal with PGM
6358 * regarding locking and unlocking needs to be struct. A couple of TLBs
6359 * living in PGM, but with publicly accessible inlined access methods
6360 * could perhaps be an even better solution. */
6361 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6362 GCPhysMem,
6363 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6364 pIemCpu->fBypassHandlers,
6365 ppvMem,
6366 pLock);
6367 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6368 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6369
6370#ifdef IEM_VERIFICATION_MODE_FULL
6371 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6372 pIemCpu->fProblematicMemory = true;
6373#endif
6374 return rc;
6375}
6376
6377
6378/**
6379 * Unmap a page previously mapped by iemMemPageMap.
6380 *
6381 * @param pIemCpu The IEM per CPU data.
6382 * @param GCPhysMem The physical address.
6383 * @param fAccess The intended access.
6384 * @param pvMem What iemMemPageMap returned.
6385 * @param pLock The PGM lock.
6386 */
6387DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6388{
6389 NOREF(pIemCpu);
6390 NOREF(GCPhysMem);
6391 NOREF(fAccess);
6392 NOREF(pvMem);
6393 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6394}
6395
6396
6397/**
6398 * Looks up a memory mapping entry.
6399 *
6400 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6401 * @param pIemCpu The IEM per CPU data.
6402 * @param pvMem The memory address.
6403 * @param fAccess The access to.
6404 */
6405DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6406{
6407 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6408 if ( pIemCpu->aMemMappings[0].pv == pvMem
6409 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6410 return 0;
6411 if ( pIemCpu->aMemMappings[1].pv == pvMem
6412 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6413 return 1;
6414 if ( pIemCpu->aMemMappings[2].pv == pvMem
6415 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6416 return 2;
6417 return VERR_NOT_FOUND;
6418}
6419
6420
6421/**
6422 * Finds a free memmap entry when using iNextMapping doesn't work.
6423 *
6424 * @returns Memory mapping index, 1024 on failure.
6425 * @param pIemCpu The IEM per CPU data.
6426 */
6427IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6428{
6429 /*
6430 * The easy case.
6431 */
6432 if (pIemCpu->cActiveMappings == 0)
6433 {
6434 pIemCpu->iNextMapping = 1;
6435 return 0;
6436 }
6437
6438 /* There should be enough mappings for all instructions. */
6439 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6440
6441 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6442 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6443 return i;
6444
6445 AssertFailedReturn(1024);
6446}
6447
6448
6449/**
6450 * Commits a bounce buffer that needs writing back and unmaps it.
6451 *
6452 * @returns Strict VBox status code.
6453 * @param pIemCpu The IEM per CPU data.
6454 * @param iMemMap The index of the buffer to commit.
6455 */
6456IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6457{
6458 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6459 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6460
6461 /*
6462 * Do the writing.
6463 */
6464#ifndef IEM_VERIFICATION_MODE_MINIMAL
6465 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6466 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6467 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6468 {
6469 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6470 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6471 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6472 if (!pIemCpu->fBypassHandlers)
6473 {
6474 /*
6475 * Carefully and efficiently dealing with access handler return
6476 * codes make this a little bloated.
6477 */
6478 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6479 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6480 pbBuf,
6481 cbFirst,
6482 PGMACCESSORIGIN_IEM);
6483 if (rcStrict == VINF_SUCCESS)
6484 {
6485 if (cbSecond)
6486 {
6487 rcStrict = PGMPhysWrite(pVM,
6488 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6489 pbBuf + cbFirst,
6490 cbSecond,
6491 PGMACCESSORIGIN_IEM);
6492 if (rcStrict == VINF_SUCCESS)
6493 { /* nothing */ }
6494 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6495 {
6496 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6497 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6498 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6499 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6500 }
6501 else
6502 {
6503 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6504 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6505 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6506 return rcStrict;
6507 }
6508 }
6509 }
6510 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6511 {
6512 if (!cbSecond)
6513 {
6514 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6515 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6516 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6517 }
6518 else
6519 {
6520 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6521 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6522 pbBuf + cbFirst,
6523 cbSecond,
6524 PGMACCESSORIGIN_IEM);
6525 if (rcStrict2 == VINF_SUCCESS)
6526 {
6527 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6528 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6529 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6530 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6531 }
6532 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6533 {
6534 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6535 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6536 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6537 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6538 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6539 }
6540 else
6541 {
6542 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6543 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6544 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6545 return rcStrict2;
6546 }
6547 }
6548 }
6549 else
6550 {
6551 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6552 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6553 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6554 return rcStrict;
6555 }
6556 }
6557 else
6558 {
6559 /*
6560 * No access handlers, much simpler.
6561 */
6562 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6563 if (RT_SUCCESS(rc))
6564 {
6565 if (cbSecond)
6566 {
6567 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6568 if (RT_SUCCESS(rc))
6569 { /* likely */ }
6570 else
6571 {
6572 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6573 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6574 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6575 return rc;
6576 }
6577 }
6578 }
6579 else
6580 {
6581 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6582 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6583 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6584 return rc;
6585 }
6586 }
6587 }
6588#endif
6589
6590#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6591 /*
6592 * Record the write(s).
6593 */
6594 if (!pIemCpu->fNoRem)
6595 {
6596 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6597 if (pEvtRec)
6598 {
6599 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6600 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6601 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6602 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6603 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6604 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6605 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6606 }
6607 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6608 {
6609 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6610 if (pEvtRec)
6611 {
6612 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6613 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6614 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6615 memcpy(pEvtRec->u.RamWrite.ab,
6616 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6617 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6618 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6619 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6620 }
6621 }
6622 }
6623#endif
6624#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6625 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6626 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6627 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6628 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6629 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6630 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6631
6632 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6633 g_cbIemWrote = cbWrote;
6634 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6635#endif
6636
6637 /*
6638 * Free the mapping entry.
6639 */
6640 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6641 Assert(pIemCpu->cActiveMappings != 0);
6642 pIemCpu->cActiveMappings--;
6643 return VINF_SUCCESS;
6644}
6645
6646
6647/**
6648 * iemMemMap worker that deals with a request crossing pages.
6649 */
6650IEM_STATIC VBOXSTRICTRC
6651iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6652{
6653 /*
6654 * Do the address translations.
6655 */
6656 RTGCPHYS GCPhysFirst;
6657 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6658 if (rcStrict != VINF_SUCCESS)
6659 return rcStrict;
6660
6661/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6662 * last byte. */
6663 RTGCPHYS GCPhysSecond;
6664 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6665 if (rcStrict != VINF_SUCCESS)
6666 return rcStrict;
6667 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6668
6669 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6670#ifdef IEM_VERIFICATION_MODE_FULL
6671 /*
6672 * Detect problematic memory when verifying so we can select
6673 * the right execution engine. (TLB: Redo this.)
6674 */
6675 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6676 {
6677 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6678 if (RT_SUCCESS(rc2))
6679 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6680 if (RT_FAILURE(rc2))
6681 pIemCpu->fProblematicMemory = true;
6682 }
6683#endif
6684
6685
6686 /*
6687 * Read in the current memory content if it's a read, execute or partial
6688 * write access.
6689 */
6690 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6691 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6692 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6693
6694 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6695 {
6696 if (!pIemCpu->fBypassHandlers)
6697 {
6698 /*
6699 * Must carefully deal with access handler status codes here,
6700 * makes the code a bit bloated.
6701 */
6702 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6703 if (rcStrict == VINF_SUCCESS)
6704 {
6705 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6706 if (rcStrict == VINF_SUCCESS)
6707 { /*likely */ }
6708 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6709 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6710 else
6711 {
6712 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6713 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6714 return rcStrict;
6715 }
6716 }
6717 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6718 {
6719 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6720 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6721 {
6722 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6723 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6724 }
6725 else
6726 {
6727 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6728 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6729 return rcStrict2;
6730 }
6731 }
6732 else
6733 {
6734 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6735 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6736 return rcStrict;
6737 }
6738 }
6739 else
6740 {
6741 /*
6742 * No informational status codes here, much more straight forward.
6743 */
6744 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6745 if (RT_SUCCESS(rc))
6746 {
6747 Assert(rc == VINF_SUCCESS);
6748 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6749 if (RT_SUCCESS(rc))
6750 Assert(rc == VINF_SUCCESS);
6751 else
6752 {
6753 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6754 return rc;
6755 }
6756 }
6757 else
6758 {
6759 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6760 return rc;
6761 }
6762 }
6763
6764#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6765 if ( !pIemCpu->fNoRem
6766 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6767 {
6768 /*
6769 * Record the reads.
6770 */
6771 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6772 if (pEvtRec)
6773 {
6774 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6775 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6776 pEvtRec->u.RamRead.cb = cbFirstPage;
6777 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6778 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6779 }
6780 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6781 if (pEvtRec)
6782 {
6783 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6784 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6785 pEvtRec->u.RamRead.cb = cbSecondPage;
6786 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6787 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6788 }
6789 }
6790#endif
6791 }
6792#ifdef VBOX_STRICT
6793 else
6794 memset(pbBuf, 0xcc, cbMem);
6795 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6796 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6797#endif
6798
6799 /*
6800 * Commit the bounce buffer entry.
6801 */
6802 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6803 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6804 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6805 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6806 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6807 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6808 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6809 pIemCpu->iNextMapping = iMemMap + 1;
6810 pIemCpu->cActiveMappings++;
6811
6812 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6813 *ppvMem = pbBuf;
6814 return VINF_SUCCESS;
6815}
6816
6817
6818/**
6819 * iemMemMap woker that deals with iemMemPageMap failures.
6820 */
6821IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6822 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6823{
6824 /*
6825 * Filter out conditions we can handle and the ones which shouldn't happen.
6826 */
6827 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6828 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6829 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6830 {
6831 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
6832 return rcMap;
6833 }
6834 pIemCpu->cPotentialExits++;
6835
6836 /*
6837 * Read in the current memory content if it's a read, execute or partial
6838 * write access.
6839 */
6840 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6841 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6842 {
6843 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6844 memset(pbBuf, 0xff, cbMem);
6845 else
6846 {
6847 int rc;
6848 if (!pIemCpu->fBypassHandlers)
6849 {
6850 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6851 if (rcStrict == VINF_SUCCESS)
6852 { /* nothing */ }
6853 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6854 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6855 else
6856 {
6857 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6858 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6859 return rcStrict;
6860 }
6861 }
6862 else
6863 {
6864 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6865 if (RT_SUCCESS(rc))
6866 { /* likely */ }
6867 else
6868 {
6869 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6870 GCPhysFirst, rc));
6871 return rc;
6872 }
6873 }
6874 }
6875
6876#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6877 if ( !pIemCpu->fNoRem
6878 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6879 {
6880 /*
6881 * Record the read.
6882 */
6883 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6884 if (pEvtRec)
6885 {
6886 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6887 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6888 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6889 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6890 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6891 }
6892 }
6893#endif
6894 }
6895#ifdef VBOX_STRICT
6896 else
6897 memset(pbBuf, 0xcc, cbMem);
6898#endif
6899#ifdef VBOX_STRICT
6900 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6901 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6902#endif
6903
6904 /*
6905 * Commit the bounce buffer entry.
6906 */
6907 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6908 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6909 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6910 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6911 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6912 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6913 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6914 pIemCpu->iNextMapping = iMemMap + 1;
6915 pIemCpu->cActiveMappings++;
6916
6917 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6918 *ppvMem = pbBuf;
6919 return VINF_SUCCESS;
6920}
6921
6922
6923
6924/**
6925 * Maps the specified guest memory for the given kind of access.
6926 *
6927 * This may be using bounce buffering of the memory if it's crossing a page
6928 * boundary or if there is an access handler installed for any of it. Because
6929 * of lock prefix guarantees, we're in for some extra clutter when this
6930 * happens.
6931 *
6932 * This may raise a \#GP, \#SS, \#PF or \#AC.
6933 *
6934 * @returns VBox strict status code.
6935 *
6936 * @param pIemCpu The IEM per CPU data.
6937 * @param ppvMem Where to return the pointer to the mapped
6938 * memory.
6939 * @param cbMem The number of bytes to map. This is usually 1,
6940 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6941 * string operations it can be up to a page.
6942 * @param iSegReg The index of the segment register to use for
6943 * this access. The base and limits are checked.
6944 * Use UINT8_MAX to indicate that no segmentation
6945 * is required (for IDT, GDT and LDT accesses).
6946 * @param GCPtrMem The address of the guest memory.
6947 * @param a_fAccess How the memory is being accessed. The
6948 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6949 * how to map the memory, while the
6950 * IEM_ACCESS_WHAT_XXX bit is used when raising
6951 * exceptions.
6952 */
6953IEM_STATIC VBOXSTRICTRC
6954iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
6955{
6956 /*
6957 * Check the input and figure out which mapping entry to use.
6958 */
6959 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6960 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6961
6962 unsigned iMemMap = pIemCpu->iNextMapping;
6963 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
6964 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6965 {
6966 iMemMap = iemMemMapFindFree(pIemCpu);
6967 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
6968 }
6969
6970 /*
6971 * Map the memory, checking that we can actually access it. If something
6972 * slightly complicated happens, fall back on bounce buffering.
6973 */
6974 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6975 if (rcStrict != VINF_SUCCESS)
6976 return rcStrict;
6977
6978 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
6979 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6980
6981 RTGCPHYS GCPhysFirst;
6982 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
6983 if (rcStrict != VINF_SUCCESS)
6984 return rcStrict;
6985
6986 void *pvMem;
6987 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6988 if (rcStrict != VINF_SUCCESS)
6989 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6990
6991 /*
6992 * Fill in the mapping table entry.
6993 */
6994 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
6995 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
6996 pIemCpu->iNextMapping = iMemMap + 1;
6997 pIemCpu->cActiveMappings++;
6998
6999 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7000 *ppvMem = pvMem;
7001 return VINF_SUCCESS;
7002}
7003
7004
7005/**
7006 * Commits the guest memory if bounce buffered and unmaps it.
7007 *
7008 * @returns Strict VBox status code.
7009 * @param pIemCpu The IEM per CPU data.
7010 * @param pvMem The mapping.
7011 * @param fAccess The kind of access.
7012 */
7013IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7014{
7015 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7016 AssertReturn(iMemMap >= 0, iMemMap);
7017
7018 /* If it's bounce buffered, we may need to write back the buffer. */
7019 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7020 {
7021 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7022 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7023 }
7024 /* Otherwise unlock it. */
7025 else
7026 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7027
7028 /* Free the entry. */
7029 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7030 Assert(pIemCpu->cActiveMappings != 0);
7031 pIemCpu->cActiveMappings--;
7032 return VINF_SUCCESS;
7033}
7034
7035
7036/**
7037 * Rollbacks mappings, releasing page locks and such.
7038 *
7039 * The caller shall only call this after checking cActiveMappings.
7040 *
7041 * @returns Strict VBox status code to pass up.
7042 * @param pIemCpu The IEM per CPU data.
7043 */
7044IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7045{
7046 Assert(pIemCpu->cActiveMappings > 0);
7047
7048 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7049 while (iMemMap-- > 0)
7050 {
7051 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7052 if (fAccess != IEM_ACCESS_INVALID)
7053 {
7054 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7055 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7056 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7057 Assert(pIemCpu->cActiveMappings > 0);
7058 pIemCpu->cActiveMappings--;
7059 }
7060 }
7061}
7062
7063
7064/**
7065 * Fetches a data byte.
7066 *
7067 * @returns Strict VBox status code.
7068 * @param pIemCpu The IEM per CPU data.
7069 * @param pu8Dst Where to return the byte.
7070 * @param iSegReg The index of the segment register to use for
7071 * this access. The base and limits are checked.
7072 * @param GCPtrMem The address of the guest memory.
7073 */
7074IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7075{
7076 /* The lazy approach for now... */
7077 uint8_t const *pu8Src;
7078 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7079 if (rc == VINF_SUCCESS)
7080 {
7081 *pu8Dst = *pu8Src;
7082 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7083 }
7084 return rc;
7085}
7086
7087
7088/**
7089 * Fetches a data word.
7090 *
7091 * @returns Strict VBox status code.
7092 * @param pIemCpu The IEM per CPU data.
7093 * @param pu16Dst Where to return the word.
7094 * @param iSegReg The index of the segment register to use for
7095 * this access. The base and limits are checked.
7096 * @param GCPtrMem The address of the guest memory.
7097 */
7098IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7099{
7100 /* The lazy approach for now... */
7101 uint16_t const *pu16Src;
7102 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7103 if (rc == VINF_SUCCESS)
7104 {
7105 *pu16Dst = *pu16Src;
7106 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7107 }
7108 return rc;
7109}
7110
7111
7112/**
7113 * Fetches a data dword.
7114 *
7115 * @returns Strict VBox status code.
7116 * @param pIemCpu The IEM per CPU data.
7117 * @param pu32Dst Where to return the dword.
7118 * @param iSegReg The index of the segment register to use for
7119 * this access. The base and limits are checked.
7120 * @param GCPtrMem The address of the guest memory.
7121 */
7122IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7123{
7124 /* The lazy approach for now... */
7125 uint32_t const *pu32Src;
7126 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7127 if (rc == VINF_SUCCESS)
7128 {
7129 *pu32Dst = *pu32Src;
7130 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7131 }
7132 return rc;
7133}
7134
7135
7136#ifdef SOME_UNUSED_FUNCTION
7137/**
7138 * Fetches a data dword and sign extends it to a qword.
7139 *
7140 * @returns Strict VBox status code.
7141 * @param pIemCpu The IEM per CPU data.
7142 * @param pu64Dst Where to return the sign extended value.
7143 * @param iSegReg The index of the segment register to use for
7144 * this access. The base and limits are checked.
7145 * @param GCPtrMem The address of the guest memory.
7146 */
7147IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7148{
7149 /* The lazy approach for now... */
7150 int32_t const *pi32Src;
7151 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7152 if (rc == VINF_SUCCESS)
7153 {
7154 *pu64Dst = *pi32Src;
7155 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7156 }
7157#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7158 else
7159 *pu64Dst = 0;
7160#endif
7161 return rc;
7162}
7163#endif
7164
7165
7166/**
7167 * Fetches a data qword.
7168 *
7169 * @returns Strict VBox status code.
7170 * @param pIemCpu The IEM per CPU data.
7171 * @param pu64Dst Where to return the qword.
7172 * @param iSegReg The index of the segment register to use for
7173 * this access. The base and limits are checked.
7174 * @param GCPtrMem The address of the guest memory.
7175 */
7176IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7177{
7178 /* The lazy approach for now... */
7179 uint64_t const *pu64Src;
7180 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7181 if (rc == VINF_SUCCESS)
7182 {
7183 *pu64Dst = *pu64Src;
7184 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7185 }
7186 return rc;
7187}
7188
7189
7190/**
7191 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7192 *
7193 * @returns Strict VBox status code.
7194 * @param pIemCpu The IEM per CPU data.
7195 * @param pu64Dst Where to return the qword.
7196 * @param iSegReg The index of the segment register to use for
7197 * this access. The base and limits are checked.
7198 * @param GCPtrMem The address of the guest memory.
7199 */
7200IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7201{
7202 /* The lazy approach for now... */
7203 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7204 if (RT_UNLIKELY(GCPtrMem & 15))
7205 return iemRaiseGeneralProtectionFault0(pIemCpu);
7206
7207 uint64_t const *pu64Src;
7208 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7209 if (rc == VINF_SUCCESS)
7210 {
7211 *pu64Dst = *pu64Src;
7212 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7213 }
7214 return rc;
7215}
7216
7217
7218/**
7219 * Fetches a data tword.
7220 *
7221 * @returns Strict VBox status code.
7222 * @param pIemCpu The IEM per CPU data.
7223 * @param pr80Dst Where to return the tword.
7224 * @param iSegReg The index of the segment register to use for
7225 * this access. The base and limits are checked.
7226 * @param GCPtrMem The address of the guest memory.
7227 */
7228IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7229{
7230 /* The lazy approach for now... */
7231 PCRTFLOAT80U pr80Src;
7232 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7233 if (rc == VINF_SUCCESS)
7234 {
7235 *pr80Dst = *pr80Src;
7236 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7237 }
7238 return rc;
7239}
7240
7241
7242/**
7243 * Fetches a data dqword (double qword), generally SSE related.
7244 *
7245 * @returns Strict VBox status code.
7246 * @param pIemCpu The IEM per CPU data.
7247 * @param pu128Dst Where to return the qword.
7248 * @param iSegReg The index of the segment register to use for
7249 * this access. The base and limits are checked.
7250 * @param GCPtrMem The address of the guest memory.
7251 */
7252IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7253{
7254 /* The lazy approach for now... */
7255 uint128_t const *pu128Src;
7256 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7257 if (rc == VINF_SUCCESS)
7258 {
7259 *pu128Dst = *pu128Src;
7260 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7261 }
7262 return rc;
7263}
7264
7265
7266/**
7267 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7268 * related.
7269 *
7270 * Raises \#GP(0) if not aligned.
7271 *
7272 * @returns Strict VBox status code.
7273 * @param pIemCpu The IEM per CPU data.
7274 * @param pu128Dst Where to return the qword.
7275 * @param iSegReg The index of the segment register to use for
7276 * this access. The base and limits are checked.
7277 * @param GCPtrMem The address of the guest memory.
7278 */
7279IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7280{
7281 /* The lazy approach for now... */
7282 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7283 if ( (GCPtrMem & 15)
7284 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7285 return iemRaiseGeneralProtectionFault0(pIemCpu);
7286
7287 uint128_t const *pu128Src;
7288 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7289 if (rc == VINF_SUCCESS)
7290 {
7291 *pu128Dst = *pu128Src;
7292 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7293 }
7294 return rc;
7295}
7296
7297
7298
7299
7300/**
7301 * Fetches a descriptor register (lgdt, lidt).
7302 *
7303 * @returns Strict VBox status code.
7304 * @param pIemCpu The IEM per CPU data.
7305 * @param pcbLimit Where to return the limit.
7306 * @param pGCPTrBase Where to return the base.
7307 * @param iSegReg The index of the segment register to use for
7308 * this access. The base and limits are checked.
7309 * @param GCPtrMem The address of the guest memory.
7310 * @param enmOpSize The effective operand size.
7311 */
7312IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7313 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7314{
7315 uint8_t const *pu8Src;
7316 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7317 (void **)&pu8Src,
7318 enmOpSize == IEMMODE_64BIT
7319 ? 2 + 8
7320 : enmOpSize == IEMMODE_32BIT
7321 ? 2 + 4
7322 : 2 + 3,
7323 iSegReg,
7324 GCPtrMem,
7325 IEM_ACCESS_DATA_R);
7326 if (rcStrict == VINF_SUCCESS)
7327 {
7328 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7329 switch (enmOpSize)
7330 {
7331 case IEMMODE_16BIT:
7332 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7333 break;
7334 case IEMMODE_32BIT:
7335 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7336 break;
7337 case IEMMODE_64BIT:
7338 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7339 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7340 break;
7341
7342 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7343 }
7344 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7345 }
7346 return rcStrict;
7347}
7348
7349
7350
7351/**
7352 * Stores a data byte.
7353 *
7354 * @returns Strict VBox status code.
7355 * @param pIemCpu The IEM per CPU data.
7356 * @param iSegReg The index of the segment register to use for
7357 * this access. The base and limits are checked.
7358 * @param GCPtrMem The address of the guest memory.
7359 * @param u8Value The value to store.
7360 */
7361IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7362{
7363 /* The lazy approach for now... */
7364 uint8_t *pu8Dst;
7365 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7366 if (rc == VINF_SUCCESS)
7367 {
7368 *pu8Dst = u8Value;
7369 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7370 }
7371 return rc;
7372}
7373
7374
7375/**
7376 * Stores a data word.
7377 *
7378 * @returns Strict VBox status code.
7379 * @param pIemCpu The IEM per CPU data.
7380 * @param iSegReg The index of the segment register to use for
7381 * this access. The base and limits are checked.
7382 * @param GCPtrMem The address of the guest memory.
7383 * @param u16Value The value to store.
7384 */
7385IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7386{
7387 /* The lazy approach for now... */
7388 uint16_t *pu16Dst;
7389 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7390 if (rc == VINF_SUCCESS)
7391 {
7392 *pu16Dst = u16Value;
7393 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7394 }
7395 return rc;
7396}
7397
7398
7399/**
7400 * Stores a data dword.
7401 *
7402 * @returns Strict VBox status code.
7403 * @param pIemCpu The IEM per CPU data.
7404 * @param iSegReg The index of the segment register to use for
7405 * this access. The base and limits are checked.
7406 * @param GCPtrMem The address of the guest memory.
7407 * @param u32Value The value to store.
7408 */
7409IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7410{
7411 /* The lazy approach for now... */
7412 uint32_t *pu32Dst;
7413 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7414 if (rc == VINF_SUCCESS)
7415 {
7416 *pu32Dst = u32Value;
7417 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7418 }
7419 return rc;
7420}
7421
7422
7423/**
7424 * Stores a data qword.
7425 *
7426 * @returns Strict VBox status code.
7427 * @param pIemCpu The IEM per CPU data.
7428 * @param iSegReg The index of the segment register to use for
7429 * this access. The base and limits are checked.
7430 * @param GCPtrMem The address of the guest memory.
7431 * @param u64Value The value to store.
7432 */
7433IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7434{
7435 /* The lazy approach for now... */
7436 uint64_t *pu64Dst;
7437 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7438 if (rc == VINF_SUCCESS)
7439 {
7440 *pu64Dst = u64Value;
7441 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7442 }
7443 return rc;
7444}
7445
7446
7447/**
7448 * Stores a data dqword.
7449 *
7450 * @returns Strict VBox status code.
7451 * @param pIemCpu The IEM per CPU data.
7452 * @param iSegReg The index of the segment register to use for
7453 * this access. The base and limits are checked.
7454 * @param GCPtrMem The address of the guest memory.
7455 * @param u64Value The value to store.
7456 */
7457IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7458{
7459 /* The lazy approach for now... */
7460 uint128_t *pu128Dst;
7461 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7462 if (rc == VINF_SUCCESS)
7463 {
7464 *pu128Dst = u128Value;
7465 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7466 }
7467 return rc;
7468}
7469
7470
7471/**
7472 * Stores a data dqword, SSE aligned.
7473 *
7474 * @returns Strict VBox status code.
7475 * @param pIemCpu The IEM per CPU data.
7476 * @param iSegReg The index of the segment register to use for
7477 * this access. The base and limits are checked.
7478 * @param GCPtrMem The address of the guest memory.
7479 * @param u64Value The value to store.
7480 */
7481IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7482{
7483 /* The lazy approach for now... */
7484 if ( (GCPtrMem & 15)
7485 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7486 return iemRaiseGeneralProtectionFault0(pIemCpu);
7487
7488 uint128_t *pu128Dst;
7489 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7490 if (rc == VINF_SUCCESS)
7491 {
7492 *pu128Dst = u128Value;
7493 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7494 }
7495 return rc;
7496}
7497
7498
7499/**
7500 * Stores a descriptor register (sgdt, sidt).
7501 *
7502 * @returns Strict VBox status code.
7503 * @param pIemCpu The IEM per CPU data.
7504 * @param cbLimit The limit.
7505 * @param GCPTrBase The base address.
7506 * @param iSegReg The index of the segment register to use for
7507 * this access. The base and limits are checked.
7508 * @param GCPtrMem The address of the guest memory.
7509 * @param enmOpSize The effective operand size.
7510 */
7511IEM_STATIC VBOXSTRICTRC
7512iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7513{
7514 uint8_t *pu8Src;
7515 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7516 (void **)&pu8Src,
7517 enmOpSize == IEMMODE_64BIT
7518 ? 2 + 8
7519 : enmOpSize == IEMMODE_32BIT
7520 ? 2 + 4
7521 : 2 + 3,
7522 iSegReg,
7523 GCPtrMem,
7524 IEM_ACCESS_DATA_W);
7525 if (rcStrict == VINF_SUCCESS)
7526 {
7527 pu8Src[0] = RT_BYTE1(cbLimit);
7528 pu8Src[1] = RT_BYTE2(cbLimit);
7529 pu8Src[2] = RT_BYTE1(GCPtrBase);
7530 pu8Src[3] = RT_BYTE2(GCPtrBase);
7531 pu8Src[4] = RT_BYTE3(GCPtrBase);
7532 if (enmOpSize == IEMMODE_16BIT)
7533 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7534 else
7535 {
7536 pu8Src[5] = RT_BYTE4(GCPtrBase);
7537 if (enmOpSize == IEMMODE_64BIT)
7538 {
7539 pu8Src[6] = RT_BYTE5(GCPtrBase);
7540 pu8Src[7] = RT_BYTE6(GCPtrBase);
7541 pu8Src[8] = RT_BYTE7(GCPtrBase);
7542 pu8Src[9] = RT_BYTE8(GCPtrBase);
7543 }
7544 }
7545 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7546 }
7547 return rcStrict;
7548}
7549
7550
7551/**
7552 * Pushes a word onto the stack.
7553 *
7554 * @returns Strict VBox status code.
7555 * @param pIemCpu The IEM per CPU data.
7556 * @param u16Value The value to push.
7557 */
7558IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7559{
7560 /* Increment the stack pointer. */
7561 uint64_t uNewRsp;
7562 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7563 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7564
7565 /* Write the word the lazy way. */
7566 uint16_t *pu16Dst;
7567 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7568 if (rc == VINF_SUCCESS)
7569 {
7570 *pu16Dst = u16Value;
7571 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7572 }
7573
7574 /* Commit the new RSP value unless we an access handler made trouble. */
7575 if (rc == VINF_SUCCESS)
7576 pCtx->rsp = uNewRsp;
7577
7578 return rc;
7579}
7580
7581
7582/**
7583 * Pushes a dword onto the stack.
7584 *
7585 * @returns Strict VBox status code.
7586 * @param pIemCpu The IEM per CPU data.
7587 * @param u32Value The value to push.
7588 */
7589IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7590{
7591 /* Increment the stack pointer. */
7592 uint64_t uNewRsp;
7593 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7594 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7595
7596 /* Write the dword the lazy way. */
7597 uint32_t *pu32Dst;
7598 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7599 if (rc == VINF_SUCCESS)
7600 {
7601 *pu32Dst = u32Value;
7602 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7603 }
7604
7605 /* Commit the new RSP value unless we an access handler made trouble. */
7606 if (rc == VINF_SUCCESS)
7607 pCtx->rsp = uNewRsp;
7608
7609 return rc;
7610}
7611
7612
7613/**
7614 * Pushes a dword segment register value onto the stack.
7615 *
7616 * @returns Strict VBox status code.
7617 * @param pIemCpu The IEM per CPU data.
7618 * @param u16Value The value to push.
7619 */
7620IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7621{
7622 /* Increment the stack pointer. */
7623 uint64_t uNewRsp;
7624 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7625 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7626
7627 VBOXSTRICTRC rc;
7628 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7629 {
7630 /* The recompiler writes a full dword. */
7631 uint32_t *pu32Dst;
7632 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7633 if (rc == VINF_SUCCESS)
7634 {
7635 *pu32Dst = u32Value;
7636 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7637 }
7638 }
7639 else
7640 {
7641 /* The intel docs talks about zero extending the selector register
7642 value. My actual intel CPU here might be zero extending the value
7643 but it still only writes the lower word... */
7644 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7645 * happens when crossing an electric page boundrary, is the high word
7646 * checked for write accessibility or not? Probably it is. What about
7647 * segment limits? */
7648 uint16_t *pu16Dst;
7649 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7650 if (rc == VINF_SUCCESS)
7651 {
7652 *pu16Dst = (uint16_t)u32Value;
7653 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7654 }
7655 }
7656
7657 /* Commit the new RSP value unless we an access handler made trouble. */
7658 if (rc == VINF_SUCCESS)
7659 pCtx->rsp = uNewRsp;
7660
7661 return rc;
7662}
7663
7664
7665/**
7666 * Pushes a qword onto the stack.
7667 *
7668 * @returns Strict VBox status code.
7669 * @param pIemCpu The IEM per CPU data.
7670 * @param u64Value The value to push.
7671 */
7672IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7673{
7674 /* Increment the stack pointer. */
7675 uint64_t uNewRsp;
7676 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7677 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7678
7679 /* Write the word the lazy way. */
7680 uint64_t *pu64Dst;
7681 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7682 if (rc == VINF_SUCCESS)
7683 {
7684 *pu64Dst = u64Value;
7685 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7686 }
7687
7688 /* Commit the new RSP value unless we an access handler made trouble. */
7689 if (rc == VINF_SUCCESS)
7690 pCtx->rsp = uNewRsp;
7691
7692 return rc;
7693}
7694
7695
7696/**
7697 * Pops a word from the stack.
7698 *
7699 * @returns Strict VBox status code.
7700 * @param pIemCpu The IEM per CPU data.
7701 * @param pu16Value Where to store the popped value.
7702 */
7703IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7704{
7705 /* Increment the stack pointer. */
7706 uint64_t uNewRsp;
7707 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7708 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7709
7710 /* Write the word the lazy way. */
7711 uint16_t const *pu16Src;
7712 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7713 if (rc == VINF_SUCCESS)
7714 {
7715 *pu16Value = *pu16Src;
7716 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7717
7718 /* Commit the new RSP value. */
7719 if (rc == VINF_SUCCESS)
7720 pCtx->rsp = uNewRsp;
7721 }
7722
7723 return rc;
7724}
7725
7726
7727/**
7728 * Pops a dword from the stack.
7729 *
7730 * @returns Strict VBox status code.
7731 * @param pIemCpu The IEM per CPU data.
7732 * @param pu32Value Where to store the popped value.
7733 */
7734IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7735{
7736 /* Increment the stack pointer. */
7737 uint64_t uNewRsp;
7738 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7739 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7740
7741 /* Write the word the lazy way. */
7742 uint32_t const *pu32Src;
7743 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7744 if (rc == VINF_SUCCESS)
7745 {
7746 *pu32Value = *pu32Src;
7747 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7748
7749 /* Commit the new RSP value. */
7750 if (rc == VINF_SUCCESS)
7751 pCtx->rsp = uNewRsp;
7752 }
7753
7754 return rc;
7755}
7756
7757
7758/**
7759 * Pops a qword from the stack.
7760 *
7761 * @returns Strict VBox status code.
7762 * @param pIemCpu The IEM per CPU data.
7763 * @param pu64Value Where to store the popped value.
7764 */
7765IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7766{
7767 /* Increment the stack pointer. */
7768 uint64_t uNewRsp;
7769 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7770 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7771
7772 /* Write the word the lazy way. */
7773 uint64_t const *pu64Src;
7774 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7775 if (rc == VINF_SUCCESS)
7776 {
7777 *pu64Value = *pu64Src;
7778 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7779
7780 /* Commit the new RSP value. */
7781 if (rc == VINF_SUCCESS)
7782 pCtx->rsp = uNewRsp;
7783 }
7784
7785 return rc;
7786}
7787
7788
7789/**
7790 * Pushes a word onto the stack, using a temporary stack pointer.
7791 *
7792 * @returns Strict VBox status code.
7793 * @param pIemCpu The IEM per CPU data.
7794 * @param u16Value The value to push.
7795 * @param pTmpRsp Pointer to the temporary stack pointer.
7796 */
7797IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7798{
7799 /* Increment the stack pointer. */
7800 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7801 RTUINT64U NewRsp = *pTmpRsp;
7802 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7803
7804 /* Write the word the lazy way. */
7805 uint16_t *pu16Dst;
7806 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7807 if (rc == VINF_SUCCESS)
7808 {
7809 *pu16Dst = u16Value;
7810 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7811 }
7812
7813 /* Commit the new RSP value unless we an access handler made trouble. */
7814 if (rc == VINF_SUCCESS)
7815 *pTmpRsp = NewRsp;
7816
7817 return rc;
7818}
7819
7820
7821/**
7822 * Pushes a dword onto the stack, using a temporary stack pointer.
7823 *
7824 * @returns Strict VBox status code.
7825 * @param pIemCpu The IEM per CPU data.
7826 * @param u32Value The value to push.
7827 * @param pTmpRsp Pointer to the temporary stack pointer.
7828 */
7829IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7830{
7831 /* Increment the stack pointer. */
7832 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7833 RTUINT64U NewRsp = *pTmpRsp;
7834 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7835
7836 /* Write the word the lazy way. */
7837 uint32_t *pu32Dst;
7838 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7839 if (rc == VINF_SUCCESS)
7840 {
7841 *pu32Dst = u32Value;
7842 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7843 }
7844
7845 /* Commit the new RSP value unless we an access handler made trouble. */
7846 if (rc == VINF_SUCCESS)
7847 *pTmpRsp = NewRsp;
7848
7849 return rc;
7850}
7851
7852
7853/**
7854 * Pushes a dword onto the stack, using a temporary stack pointer.
7855 *
7856 * @returns Strict VBox status code.
7857 * @param pIemCpu The IEM per CPU data.
7858 * @param u64Value The value to push.
7859 * @param pTmpRsp Pointer to the temporary stack pointer.
7860 */
7861IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7862{
7863 /* Increment the stack pointer. */
7864 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7865 RTUINT64U NewRsp = *pTmpRsp;
7866 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7867
7868 /* Write the word the lazy way. */
7869 uint64_t *pu64Dst;
7870 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7871 if (rc == VINF_SUCCESS)
7872 {
7873 *pu64Dst = u64Value;
7874 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7875 }
7876
7877 /* Commit the new RSP value unless we an access handler made trouble. */
7878 if (rc == VINF_SUCCESS)
7879 *pTmpRsp = NewRsp;
7880
7881 return rc;
7882}
7883
7884
7885/**
7886 * Pops a word from the stack, using a temporary stack pointer.
7887 *
7888 * @returns Strict VBox status code.
7889 * @param pIemCpu The IEM per CPU data.
7890 * @param pu16Value Where to store the popped value.
7891 * @param pTmpRsp Pointer to the temporary stack pointer.
7892 */
7893IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7894{
7895 /* Increment the stack pointer. */
7896 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7897 RTUINT64U NewRsp = *pTmpRsp;
7898 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7899
7900 /* Write the word the lazy way. */
7901 uint16_t const *pu16Src;
7902 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7903 if (rc == VINF_SUCCESS)
7904 {
7905 *pu16Value = *pu16Src;
7906 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7907
7908 /* Commit the new RSP value. */
7909 if (rc == VINF_SUCCESS)
7910 *pTmpRsp = NewRsp;
7911 }
7912
7913 return rc;
7914}
7915
7916
7917/**
7918 * Pops a dword from the stack, using a temporary stack pointer.
7919 *
7920 * @returns Strict VBox status code.
7921 * @param pIemCpu The IEM per CPU data.
7922 * @param pu32Value Where to store the popped value.
7923 * @param pTmpRsp Pointer to the temporary stack pointer.
7924 */
7925IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7926{
7927 /* Increment the stack pointer. */
7928 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7929 RTUINT64U NewRsp = *pTmpRsp;
7930 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7931
7932 /* Write the word the lazy way. */
7933 uint32_t const *pu32Src;
7934 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7935 if (rc == VINF_SUCCESS)
7936 {
7937 *pu32Value = *pu32Src;
7938 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7939
7940 /* Commit the new RSP value. */
7941 if (rc == VINF_SUCCESS)
7942 *pTmpRsp = NewRsp;
7943 }
7944
7945 return rc;
7946}
7947
7948
7949/**
7950 * Pops a qword from the stack, using a temporary stack pointer.
7951 *
7952 * @returns Strict VBox status code.
7953 * @param pIemCpu The IEM per CPU data.
7954 * @param pu64Value Where to store the popped value.
7955 * @param pTmpRsp Pointer to the temporary stack pointer.
7956 */
7957IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
7958{
7959 /* Increment the stack pointer. */
7960 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7961 RTUINT64U NewRsp = *pTmpRsp;
7962 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7963
7964 /* Write the word the lazy way. */
7965 uint64_t const *pu64Src;
7966 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7967 if (rcStrict == VINF_SUCCESS)
7968 {
7969 *pu64Value = *pu64Src;
7970 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7971
7972 /* Commit the new RSP value. */
7973 if (rcStrict == VINF_SUCCESS)
7974 *pTmpRsp = NewRsp;
7975 }
7976
7977 return rcStrict;
7978}
7979
7980
7981/**
7982 * Begin a special stack push (used by interrupt, exceptions and such).
7983 *
7984 * This will raise #SS or #PF if appropriate.
7985 *
7986 * @returns Strict VBox status code.
7987 * @param pIemCpu The IEM per CPU data.
7988 * @param cbMem The number of bytes to push onto the stack.
7989 * @param ppvMem Where to return the pointer to the stack memory.
7990 * As with the other memory functions this could be
7991 * direct access or bounce buffered access, so
7992 * don't commit register until the commit call
7993 * succeeds.
7994 * @param puNewRsp Where to return the new RSP value. This must be
7995 * passed unchanged to
7996 * iemMemStackPushCommitSpecial().
7997 */
7998IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
7999{
8000 Assert(cbMem < UINT8_MAX);
8001 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8002 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8003 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8004}
8005
8006
8007/**
8008 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8009 *
8010 * This will update the rSP.
8011 *
8012 * @returns Strict VBox status code.
8013 * @param pIemCpu The IEM per CPU data.
8014 * @param pvMem The pointer returned by
8015 * iemMemStackPushBeginSpecial().
8016 * @param uNewRsp The new RSP value returned by
8017 * iemMemStackPushBeginSpecial().
8018 */
8019IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8020{
8021 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8022 if (rcStrict == VINF_SUCCESS)
8023 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8024 return rcStrict;
8025}
8026
8027
8028/**
8029 * Begin a special stack pop (used by iret, retf and such).
8030 *
8031 * This will raise \#SS or \#PF if appropriate.
8032 *
8033 * @returns Strict VBox status code.
8034 * @param pIemCpu The IEM per CPU data.
8035 * @param cbMem The number of bytes to push onto the stack.
8036 * @param ppvMem Where to return the pointer to the stack memory.
8037 * @param puNewRsp Where to return the new RSP value. This must be
8038 * passed unchanged to
8039 * iemMemStackPopCommitSpecial() or applied
8040 * manually if iemMemStackPopDoneSpecial() is used.
8041 */
8042IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8043{
8044 Assert(cbMem < UINT8_MAX);
8045 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8046 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8047 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8048}
8049
8050
8051/**
8052 * Continue a special stack pop (used by iret and retf).
8053 *
8054 * This will raise \#SS or \#PF if appropriate.
8055 *
8056 * @returns Strict VBox status code.
8057 * @param pIemCpu The IEM per CPU data.
8058 * @param cbMem The number of bytes to push onto the stack.
8059 * @param ppvMem Where to return the pointer to the stack memory.
8060 * @param puNewRsp Where to return the new RSP value. This must be
8061 * passed unchanged to
8062 * iemMemStackPopCommitSpecial() or applied
8063 * manually if iemMemStackPopDoneSpecial() is used.
8064 */
8065IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8066{
8067 Assert(cbMem < UINT8_MAX);
8068 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8069 RTUINT64U NewRsp;
8070 NewRsp.u = *puNewRsp;
8071 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8072 *puNewRsp = NewRsp.u;
8073 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8074}
8075
8076
8077/**
8078 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8079 *
8080 * This will update the rSP.
8081 *
8082 * @returns Strict VBox status code.
8083 * @param pIemCpu The IEM per CPU data.
8084 * @param pvMem The pointer returned by
8085 * iemMemStackPopBeginSpecial().
8086 * @param uNewRsp The new RSP value returned by
8087 * iemMemStackPopBeginSpecial().
8088 */
8089IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8090{
8091 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8092 if (rcStrict == VINF_SUCCESS)
8093 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8094 return rcStrict;
8095}
8096
8097
8098/**
8099 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8100 * iemMemStackPopContinueSpecial).
8101 *
8102 * The caller will manually commit the rSP.
8103 *
8104 * @returns Strict VBox status code.
8105 * @param pIemCpu The IEM per CPU data.
8106 * @param pvMem The pointer returned by
8107 * iemMemStackPopBeginSpecial() or
8108 * iemMemStackPopContinueSpecial().
8109 */
8110IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8111{
8112 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8113}
8114
8115
8116/**
8117 * Fetches a system table byte.
8118 *
8119 * @returns Strict VBox status code.
8120 * @param pIemCpu The IEM per CPU data.
8121 * @param pbDst Where to return the byte.
8122 * @param iSegReg The index of the segment register to use for
8123 * this access. The base and limits are checked.
8124 * @param GCPtrMem The address of the guest memory.
8125 */
8126IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8127{
8128 /* The lazy approach for now... */
8129 uint8_t const *pbSrc;
8130 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8131 if (rc == VINF_SUCCESS)
8132 {
8133 *pbDst = *pbSrc;
8134 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8135 }
8136 return rc;
8137}
8138
8139
8140/**
8141 * Fetches a system table word.
8142 *
8143 * @returns Strict VBox status code.
8144 * @param pIemCpu The IEM per CPU data.
8145 * @param pu16Dst Where to return the word.
8146 * @param iSegReg The index of the segment register to use for
8147 * this access. The base and limits are checked.
8148 * @param GCPtrMem The address of the guest memory.
8149 */
8150IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8151{
8152 /* The lazy approach for now... */
8153 uint16_t const *pu16Src;
8154 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8155 if (rc == VINF_SUCCESS)
8156 {
8157 *pu16Dst = *pu16Src;
8158 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8159 }
8160 return rc;
8161}
8162
8163
8164/**
8165 * Fetches a system table dword.
8166 *
8167 * @returns Strict VBox status code.
8168 * @param pIemCpu The IEM per CPU data.
8169 * @param pu32Dst Where to return the dword.
8170 * @param iSegReg The index of the segment register to use for
8171 * this access. The base and limits are checked.
8172 * @param GCPtrMem The address of the guest memory.
8173 */
8174IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8175{
8176 /* The lazy approach for now... */
8177 uint32_t const *pu32Src;
8178 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8179 if (rc == VINF_SUCCESS)
8180 {
8181 *pu32Dst = *pu32Src;
8182 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8183 }
8184 return rc;
8185}
8186
8187
8188/**
8189 * Fetches a system table qword.
8190 *
8191 * @returns Strict VBox status code.
8192 * @param pIemCpu The IEM per CPU data.
8193 * @param pu64Dst Where to return the qword.
8194 * @param iSegReg The index of the segment register to use for
8195 * this access. The base and limits are checked.
8196 * @param GCPtrMem The address of the guest memory.
8197 */
8198IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8199{
8200 /* The lazy approach for now... */
8201 uint64_t const *pu64Src;
8202 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8203 if (rc == VINF_SUCCESS)
8204 {
8205 *pu64Dst = *pu64Src;
8206 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8207 }
8208 return rc;
8209}
8210
8211
8212/**
8213 * Fetches a descriptor table entry with caller specified error code.
8214 *
8215 * @returns Strict VBox status code.
8216 * @param pIemCpu The IEM per CPU.
8217 * @param pDesc Where to return the descriptor table entry.
8218 * @param uSel The selector which table entry to fetch.
8219 * @param uXcpt The exception to raise on table lookup error.
8220 * @param uErrorCode The error code associated with the exception.
8221 */
8222IEM_STATIC VBOXSTRICTRC
8223iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8224{
8225 AssertPtr(pDesc);
8226 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8227
8228 /** @todo did the 286 require all 8 bytes to be accessible? */
8229 /*
8230 * Get the selector table base and check bounds.
8231 */
8232 RTGCPTR GCPtrBase;
8233 if (uSel & X86_SEL_LDT)
8234 {
8235 if ( !pCtx->ldtr.Attr.n.u1Present
8236 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8237 {
8238 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8239 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8240 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8241 uErrorCode, 0);
8242 }
8243
8244 Assert(pCtx->ldtr.Attr.n.u1Present);
8245 GCPtrBase = pCtx->ldtr.u64Base;
8246 }
8247 else
8248 {
8249 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8250 {
8251 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8252 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8253 uErrorCode, 0);
8254 }
8255 GCPtrBase = pCtx->gdtr.pGdt;
8256 }
8257
8258 /*
8259 * Read the legacy descriptor and maybe the long mode extensions if
8260 * required.
8261 */
8262 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8263 if (rcStrict == VINF_SUCCESS)
8264 {
8265 if ( !IEM_IS_LONG_MODE(pIemCpu)
8266 || pDesc->Legacy.Gen.u1DescType)
8267 pDesc->Long.au64[1] = 0;
8268 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8269 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8270 else
8271 {
8272 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8273 /** @todo is this the right exception? */
8274 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8275 }
8276 }
8277 return rcStrict;
8278}
8279
8280
8281/**
8282 * Fetches a descriptor table entry.
8283 *
8284 * @returns Strict VBox status code.
8285 * @param pIemCpu The IEM per CPU.
8286 * @param pDesc Where to return the descriptor table entry.
8287 * @param uSel The selector which table entry to fetch.
8288 * @param uXcpt The exception to raise on table lookup error.
8289 */
8290IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8291{
8292 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8293}
8294
8295
8296/**
8297 * Fakes a long mode stack selector for SS = 0.
8298 *
8299 * @param pDescSs Where to return the fake stack descriptor.
8300 * @param uDpl The DPL we want.
8301 */
8302IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8303{
8304 pDescSs->Long.au64[0] = 0;
8305 pDescSs->Long.au64[1] = 0;
8306 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8307 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8308 pDescSs->Long.Gen.u2Dpl = uDpl;
8309 pDescSs->Long.Gen.u1Present = 1;
8310 pDescSs->Long.Gen.u1Long = 1;
8311}
8312
8313
8314/**
8315 * Marks the selector descriptor as accessed (only non-system descriptors).
8316 *
8317 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8318 * will therefore skip the limit checks.
8319 *
8320 * @returns Strict VBox status code.
8321 * @param pIemCpu The IEM per CPU.
8322 * @param uSel The selector.
8323 */
8324IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8325{
8326 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8327
8328 /*
8329 * Get the selector table base and calculate the entry address.
8330 */
8331 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8332 ? pCtx->ldtr.u64Base
8333 : pCtx->gdtr.pGdt;
8334 GCPtr += uSel & X86_SEL_MASK;
8335
8336 /*
8337 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8338 * ugly stuff to avoid this. This will make sure it's an atomic access
8339 * as well more or less remove any question about 8-bit or 32-bit accesss.
8340 */
8341 VBOXSTRICTRC rcStrict;
8342 uint32_t volatile *pu32;
8343 if ((GCPtr & 3) == 0)
8344 {
8345 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8346 GCPtr += 2 + 2;
8347 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8348 if (rcStrict != VINF_SUCCESS)
8349 return rcStrict;
8350 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8351 }
8352 else
8353 {
8354 /* The misaligned GDT/LDT case, map the whole thing. */
8355 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8356 if (rcStrict != VINF_SUCCESS)
8357 return rcStrict;
8358 switch ((uintptr_t)pu32 & 3)
8359 {
8360 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8361 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8362 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8363 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8364 }
8365 }
8366
8367 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8368}
8369
8370/** @} */
8371
8372
8373/*
8374 * Include the C/C++ implementation of instruction.
8375 */
8376#include "IEMAllCImpl.cpp.h"
8377
8378
8379
8380/** @name "Microcode" macros.
8381 *
8382 * The idea is that we should be able to use the same code to interpret
8383 * instructions as well as recompiler instructions. Thus this obfuscation.
8384 *
8385 * @{
8386 */
8387#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8388#define IEM_MC_END() }
8389#define IEM_MC_PAUSE() do {} while (0)
8390#define IEM_MC_CONTINUE() do {} while (0)
8391
8392/** Internal macro. */
8393#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8394 do \
8395 { \
8396 VBOXSTRICTRC rcStrict2 = a_Expr; \
8397 if (rcStrict2 != VINF_SUCCESS) \
8398 return rcStrict2; \
8399 } while (0)
8400
8401#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8402#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8403#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8404#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8405#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8406#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8407#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8408
8409#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8410#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8411 do { \
8412 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8413 return iemRaiseDeviceNotAvailable(pIemCpu); \
8414 } while (0)
8415#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8416 do { \
8417 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8418 return iemRaiseMathFault(pIemCpu); \
8419 } while (0)
8420#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8421 do { \
8422 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8423 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8424 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8425 return iemRaiseUndefinedOpcode(pIemCpu); \
8426 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8427 return iemRaiseDeviceNotAvailable(pIemCpu); \
8428 } while (0)
8429#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8430 do { \
8431 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8432 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8433 return iemRaiseUndefinedOpcode(pIemCpu); \
8434 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8435 return iemRaiseDeviceNotAvailable(pIemCpu); \
8436 } while (0)
8437#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8438 do { \
8439 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8440 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8441 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8442 return iemRaiseUndefinedOpcode(pIemCpu); \
8443 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8444 return iemRaiseDeviceNotAvailable(pIemCpu); \
8445 } while (0)
8446#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8447 do { \
8448 if (pIemCpu->uCpl != 0) \
8449 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8450 } while (0)
8451
8452
8453#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8454#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8455#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8456#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8457#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8458#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8459#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8460 uint32_t a_Name; \
8461 uint32_t *a_pName = &a_Name
8462#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8463 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8464
8465#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8466#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8467
8468#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8469#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8470#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8471#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8472#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8473#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8474#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8475#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8476#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8477#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8478#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8479#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8480#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8481#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8482#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8483#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8484#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8485#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8486#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8487#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8488#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8489#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8490#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8491#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8492#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8493#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8494#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8495#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8496#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8497/** @note Not for IOPL or IF testing or modification. */
8498#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8499#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8500#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8501#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8502
8503#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8504#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8505#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8506#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8507#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8508#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8509#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8510#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8511#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8512#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8513#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8514 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8515
8516#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8517#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8518/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8519 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8520#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8521#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8522/** @note Not for IOPL or IF testing or modification. */
8523#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8524
8525#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8526#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8527#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8528 do { \
8529 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8530 *pu32Reg += (a_u32Value); \
8531 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8532 } while (0)
8533#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8534
8535#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8536#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8537#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8538 do { \
8539 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8540 *pu32Reg -= (a_u32Value); \
8541 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8542 } while (0)
8543#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8544
8545#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8546#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8547#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8548#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8549#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8550#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8551#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8552
8553#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8554#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8555#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8556#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8557
8558#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8559#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8560#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8561
8562#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8563#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8564
8565#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8566#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8567#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8568
8569#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8570#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8571#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8572
8573#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8574
8575#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8576
8577#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8578#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8579#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8580 do { \
8581 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8582 *pu32Reg &= (a_u32Value); \
8583 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8584 } while (0)
8585#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8586
8587#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8588#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8589#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8590 do { \
8591 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8592 *pu32Reg |= (a_u32Value); \
8593 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8594 } while (0)
8595#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8596
8597
8598/** @note Not for IOPL or IF modification. */
8599#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8600/** @note Not for IOPL or IF modification. */
8601#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8602/** @note Not for IOPL or IF modification. */
8603#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8604
8605#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8606
8607
8608#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8609 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8610#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8611 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8612#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8613 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8614#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8615 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8616#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8617 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8618#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8619 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8620#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8621 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8622
8623#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8624 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8625#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8626 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8627#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8628 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8629#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8630 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8631#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8632 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8633 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8634 } while (0)
8635#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8636 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8637 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8638 } while (0)
8639#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8640 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8641#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8642 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8643#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8644 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8645
8646#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8647 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8648#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8649 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8650#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8651 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8652
8653#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8654 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8655#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8656 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8657#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8658 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8659
8660#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8661 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8662#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8663 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8664#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8665 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8666
8667#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8668 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8669
8670#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8671 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8672#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8673 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8674#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8675 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8676#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8678
8679#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8680 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8681#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8683#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8685
8686#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8687 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8688#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8689 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8690
8691
8692
8693#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8694 do { \
8695 uint8_t u8Tmp; \
8696 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8697 (a_u16Dst) = u8Tmp; \
8698 } while (0)
8699#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8700 do { \
8701 uint8_t u8Tmp; \
8702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8703 (a_u32Dst) = u8Tmp; \
8704 } while (0)
8705#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8706 do { \
8707 uint8_t u8Tmp; \
8708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8709 (a_u64Dst) = u8Tmp; \
8710 } while (0)
8711#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8712 do { \
8713 uint16_t u16Tmp; \
8714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8715 (a_u32Dst) = u16Tmp; \
8716 } while (0)
8717#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8718 do { \
8719 uint16_t u16Tmp; \
8720 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8721 (a_u64Dst) = u16Tmp; \
8722 } while (0)
8723#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8724 do { \
8725 uint32_t u32Tmp; \
8726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8727 (a_u64Dst) = u32Tmp; \
8728 } while (0)
8729
8730#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8731 do { \
8732 uint8_t u8Tmp; \
8733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8734 (a_u16Dst) = (int8_t)u8Tmp; \
8735 } while (0)
8736#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8737 do { \
8738 uint8_t u8Tmp; \
8739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8740 (a_u32Dst) = (int8_t)u8Tmp; \
8741 } while (0)
8742#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8743 do { \
8744 uint8_t u8Tmp; \
8745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8746 (a_u64Dst) = (int8_t)u8Tmp; \
8747 } while (0)
8748#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8749 do { \
8750 uint16_t u16Tmp; \
8751 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8752 (a_u32Dst) = (int16_t)u16Tmp; \
8753 } while (0)
8754#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8755 do { \
8756 uint16_t u16Tmp; \
8757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8758 (a_u64Dst) = (int16_t)u16Tmp; \
8759 } while (0)
8760#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8761 do { \
8762 uint32_t u32Tmp; \
8763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8764 (a_u64Dst) = (int32_t)u32Tmp; \
8765 } while (0)
8766
8767#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8768 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8769#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8770 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8771#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8772 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8773#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8774 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8775
8776#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8777 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8778#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8779 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8780#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8781 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8782#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8783 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8784
8785#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8786#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8787#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8788#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8789#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8790#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8791#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8792 do { \
8793 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8794 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8795 } while (0)
8796
8797#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8798 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8799#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8800 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8801
8802
8803#define IEM_MC_PUSH_U16(a_u16Value) \
8804 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8805#define IEM_MC_PUSH_U32(a_u32Value) \
8806 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8807#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8808 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8809#define IEM_MC_PUSH_U64(a_u64Value) \
8810 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8811
8812#define IEM_MC_POP_U16(a_pu16Value) \
8813 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8814#define IEM_MC_POP_U32(a_pu32Value) \
8815 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8816#define IEM_MC_POP_U64(a_pu64Value) \
8817 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8818
8819/** Maps guest memory for direct or bounce buffered access.
8820 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8821 * @remarks May return.
8822 */
8823#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8824 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8825
8826/** Maps guest memory for direct or bounce buffered access.
8827 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8828 * @remarks May return.
8829 */
8830#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8831 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8832
8833/** Commits the memory and unmaps the guest memory.
8834 * @remarks May return.
8835 */
8836#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8837 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8838
8839/** Commits the memory and unmaps the guest memory unless the FPU status word
8840 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8841 * that would cause FLD not to store.
8842 *
8843 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8844 * store, while \#P will not.
8845 *
8846 * @remarks May in theory return - for now.
8847 */
8848#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8849 do { \
8850 if ( !(a_u16FSW & X86_FSW_ES) \
8851 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8852 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8853 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8854 } while (0)
8855
8856/** Calculate efficient address from R/M. */
8857#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8858 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8859
8860#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8861#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8862#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8863#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8864#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8865#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8866#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8867
8868/**
8869 * Defers the rest of the instruction emulation to a C implementation routine
8870 * and returns, only taking the standard parameters.
8871 *
8872 * @param a_pfnCImpl The pointer to the C routine.
8873 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8874 */
8875#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8876
8877/**
8878 * Defers the rest of instruction emulation to a C implementation routine and
8879 * returns, taking one argument in addition to the standard ones.
8880 *
8881 * @param a_pfnCImpl The pointer to the C routine.
8882 * @param a0 The argument.
8883 */
8884#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8885
8886/**
8887 * Defers the rest of the instruction emulation to a C implementation routine
8888 * and returns, taking two arguments in addition to the standard ones.
8889 *
8890 * @param a_pfnCImpl The pointer to the C routine.
8891 * @param a0 The first extra argument.
8892 * @param a1 The second extra argument.
8893 */
8894#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8895
8896/**
8897 * Defers the rest of the instruction emulation to a C implementation routine
8898 * and returns, taking three arguments in addition to the standard ones.
8899 *
8900 * @param a_pfnCImpl The pointer to the C routine.
8901 * @param a0 The first extra argument.
8902 * @param a1 The second extra argument.
8903 * @param a2 The third extra argument.
8904 */
8905#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8906
8907/**
8908 * Defers the rest of the instruction emulation to a C implementation routine
8909 * and returns, taking four arguments in addition to the standard ones.
8910 *
8911 * @param a_pfnCImpl The pointer to the C routine.
8912 * @param a0 The first extra argument.
8913 * @param a1 The second extra argument.
8914 * @param a2 The third extra argument.
8915 * @param a3 The fourth extra argument.
8916 */
8917#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8918
8919/**
8920 * Defers the rest of the instruction emulation to a C implementation routine
8921 * and returns, taking two arguments in addition to the standard ones.
8922 *
8923 * @param a_pfnCImpl The pointer to the C routine.
8924 * @param a0 The first extra argument.
8925 * @param a1 The second extra argument.
8926 * @param a2 The third extra argument.
8927 * @param a3 The fourth extra argument.
8928 * @param a4 The fifth extra argument.
8929 */
8930#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8931
8932/**
8933 * Defers the entire instruction emulation to a C implementation routine and
8934 * returns, only taking the standard parameters.
8935 *
8936 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8937 *
8938 * @param a_pfnCImpl The pointer to the C routine.
8939 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8940 */
8941#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8942
8943/**
8944 * Defers the entire instruction emulation to a C implementation routine and
8945 * returns, taking one argument in addition to the standard ones.
8946 *
8947 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8948 *
8949 * @param a_pfnCImpl The pointer to the C routine.
8950 * @param a0 The argument.
8951 */
8952#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8953
8954/**
8955 * Defers the entire instruction emulation to a C implementation routine and
8956 * returns, taking two arguments in addition to the standard ones.
8957 *
8958 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8959 *
8960 * @param a_pfnCImpl The pointer to the C routine.
8961 * @param a0 The first extra argument.
8962 * @param a1 The second extra argument.
8963 */
8964#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8965
8966/**
8967 * Defers the entire instruction emulation to a C implementation routine and
8968 * returns, taking three arguments in addition to the standard ones.
8969 *
8970 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8971 *
8972 * @param a_pfnCImpl The pointer to the C routine.
8973 * @param a0 The first extra argument.
8974 * @param a1 The second extra argument.
8975 * @param a2 The third extra argument.
8976 */
8977#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8978
8979/**
8980 * Calls a FPU assembly implementation taking one visible argument.
8981 *
8982 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8983 * @param a0 The first extra argument.
8984 */
8985#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
8986 do { \
8987 iemFpuPrepareUsage(pIemCpu); \
8988 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
8989 } while (0)
8990
8991/**
8992 * Calls a FPU assembly implementation taking two visible arguments.
8993 *
8994 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8995 * @param a0 The first extra argument.
8996 * @param a1 The second extra argument.
8997 */
8998#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
8999 do { \
9000 iemFpuPrepareUsage(pIemCpu); \
9001 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9002 } while (0)
9003
9004/**
9005 * Calls a FPU assembly implementation taking three visible arguments.
9006 *
9007 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9008 * @param a0 The first extra argument.
9009 * @param a1 The second extra argument.
9010 * @param a2 The third extra argument.
9011 */
9012#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9013 do { \
9014 iemFpuPrepareUsage(pIemCpu); \
9015 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9016 } while (0)
9017
9018#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9019 do { \
9020 (a_FpuData).FSW = (a_FSW); \
9021 (a_FpuData).r80Result = *(a_pr80Value); \
9022 } while (0)
9023
9024/** Pushes FPU result onto the stack. */
9025#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9026 iemFpuPushResult(pIemCpu, &a_FpuData)
9027/** Pushes FPU result onto the stack and sets the FPUDP. */
9028#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9029 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9030
9031/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9032#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9033 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9034
9035/** Stores FPU result in a stack register. */
9036#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9037 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9038/** Stores FPU result in a stack register and pops the stack. */
9039#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9040 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9041/** Stores FPU result in a stack register and sets the FPUDP. */
9042#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9043 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9044/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9045 * stack. */
9046#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9047 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9048
9049/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9050#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9051 iemFpuUpdateOpcodeAndIp(pIemCpu)
9052/** Free a stack register (for FFREE and FFREEP). */
9053#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9054 iemFpuStackFree(pIemCpu, a_iStReg)
9055/** Increment the FPU stack pointer. */
9056#define IEM_MC_FPU_STACK_INC_TOP() \
9057 iemFpuStackIncTop(pIemCpu)
9058/** Decrement the FPU stack pointer. */
9059#define IEM_MC_FPU_STACK_DEC_TOP() \
9060 iemFpuStackDecTop(pIemCpu)
9061
9062/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9063#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9064 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9065/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9066#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9067 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9068/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9069#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9070 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9071/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9072#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9073 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9074/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9075 * stack. */
9076#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9077 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9078/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9079#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9080 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9081
9082/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9083#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9084 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9085/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9086 * stack. */
9087#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9088 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9089/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9090 * FPUDS. */
9091#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9092 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9093/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9094 * FPUDS. Pops stack. */
9095#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9096 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9097/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9098 * stack twice. */
9099#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9100 iemFpuStackUnderflowThenPopPop(pIemCpu)
9101/** Raises a FPU stack underflow exception for an instruction pushing a result
9102 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9103#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9104 iemFpuStackPushUnderflow(pIemCpu)
9105/** Raises a FPU stack underflow exception for an instruction pushing a result
9106 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9107#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9108 iemFpuStackPushUnderflowTwo(pIemCpu)
9109
9110/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9111 * FPUIP, FPUCS and FOP. */
9112#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9113 iemFpuStackPushOverflow(pIemCpu)
9114/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9115 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9116#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9117 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9118/** Indicates that we (might) have modified the FPU state. */
9119#define IEM_MC_USED_FPU() \
9120 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9121
9122/**
9123 * Calls a MMX assembly implementation taking two visible arguments.
9124 *
9125 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9126 * @param a0 The first extra argument.
9127 * @param a1 The second extra argument.
9128 */
9129#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9130 do { \
9131 iemFpuPrepareUsage(pIemCpu); \
9132 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9133 } while (0)
9134
9135/**
9136 * Calls a MMX assembly implementation taking three visible arguments.
9137 *
9138 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9139 * @param a0 The first extra argument.
9140 * @param a1 The second extra argument.
9141 * @param a2 The third extra argument.
9142 */
9143#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9144 do { \
9145 iemFpuPrepareUsage(pIemCpu); \
9146 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9147 } while (0)
9148
9149
9150/**
9151 * Calls a SSE assembly implementation taking two visible arguments.
9152 *
9153 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9154 * @param a0 The first extra argument.
9155 * @param a1 The second extra argument.
9156 */
9157#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9158 do { \
9159 iemFpuPrepareUsageSse(pIemCpu); \
9160 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9161 } while (0)
9162
9163/**
9164 * Calls a SSE assembly implementation taking three visible arguments.
9165 *
9166 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9167 * @param a0 The first extra argument.
9168 * @param a1 The second extra argument.
9169 * @param a2 The third extra argument.
9170 */
9171#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9172 do { \
9173 iemFpuPrepareUsageSse(pIemCpu); \
9174 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9175 } while (0)
9176
9177
9178/** @note Not for IOPL or IF testing. */
9179#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9180/** @note Not for IOPL or IF testing. */
9181#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9182/** @note Not for IOPL or IF testing. */
9183#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9184/** @note Not for IOPL or IF testing. */
9185#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9186/** @note Not for IOPL or IF testing. */
9187#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9188 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9189 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9190/** @note Not for IOPL or IF testing. */
9191#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9192 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9193 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9194/** @note Not for IOPL or IF testing. */
9195#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9196 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9197 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9198 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9199/** @note Not for IOPL or IF testing. */
9200#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9201 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9202 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9203 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9204#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9205#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9206#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9207/** @note Not for IOPL or IF testing. */
9208#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9209 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9210 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9211/** @note Not for IOPL or IF testing. */
9212#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9213 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9214 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9215/** @note Not for IOPL or IF testing. */
9216#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9217 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9218 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9219/** @note Not for IOPL or IF testing. */
9220#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9221 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9222 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9223/** @note Not for IOPL or IF testing. */
9224#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9225 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9226 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9227/** @note Not for IOPL or IF testing. */
9228#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9229 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9230 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9231#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9232#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9233#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9234 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9235#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9236 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9237#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9238 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9239#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9240 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9241#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9242 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9243#define IEM_MC_IF_FCW_IM() \
9244 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9245
9246#define IEM_MC_ELSE() } else {
9247#define IEM_MC_ENDIF() } do {} while (0)
9248
9249/** @} */
9250
9251
9252/** @name Opcode Debug Helpers.
9253 * @{
9254 */
9255#ifdef DEBUG
9256# define IEMOP_MNEMONIC(a_szMnemonic) \
9257 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9258 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9259# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9260 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9261 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9262#else
9263# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9264# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9265#endif
9266
9267/** @} */
9268
9269
9270/** @name Opcode Helpers.
9271 * @{
9272 */
9273
9274/** The instruction raises an \#UD in real and V8086 mode. */
9275#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9276 do \
9277 { \
9278 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9279 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9280 } while (0)
9281
9282/** The instruction allows no lock prefixing (in this encoding), throw #UD if
9283 * lock prefixed.
9284 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9285#define IEMOP_HLP_NO_LOCK_PREFIX() \
9286 do \
9287 { \
9288 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9289 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9290 } while (0)
9291
9292/** The instruction is not available in 64-bit mode, throw #UD if we're in
9293 * 64-bit mode. */
9294#define IEMOP_HLP_NO_64BIT() \
9295 do \
9296 { \
9297 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9298 return IEMOP_RAISE_INVALID_OPCODE(); \
9299 } while (0)
9300
9301/** The instruction is only available in 64-bit mode, throw #UD if we're not in
9302 * 64-bit mode. */
9303#define IEMOP_HLP_ONLY_64BIT() \
9304 do \
9305 { \
9306 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9307 return IEMOP_RAISE_INVALID_OPCODE(); \
9308 } while (0)
9309
9310/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9311#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9312 do \
9313 { \
9314 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9315 iemRecalEffOpSize64Default(pIemCpu); \
9316 } while (0)
9317
9318/** The instruction has 64-bit operand size if 64-bit mode. */
9319#define IEMOP_HLP_64BIT_OP_SIZE() \
9320 do \
9321 { \
9322 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9323 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9324 } while (0)
9325
9326/** Only a REX prefix immediately preceeding the first opcode byte takes
9327 * effect. This macro helps ensuring this as well as logging bad guest code. */
9328#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9329 do \
9330 { \
9331 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9332 { \
9333 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9334 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9335 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9336 pIemCpu->uRexB = 0; \
9337 pIemCpu->uRexIndex = 0; \
9338 pIemCpu->uRexReg = 0; \
9339 iemRecalEffOpSize(pIemCpu); \
9340 } \
9341 } while (0)
9342
9343/**
9344 * Done decoding.
9345 */
9346#define IEMOP_HLP_DONE_DECODING() \
9347 do \
9348 { \
9349 /*nothing for now, maybe later... */ \
9350 } while (0)
9351
9352/**
9353 * Done decoding, raise \#UD exception if lock prefix present.
9354 */
9355#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9356 do \
9357 { \
9358 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9359 { /* likely */ } \
9360 else \
9361 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9362 } while (0)
9363#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9364 do \
9365 { \
9366 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9367 { /* likely */ } \
9368 else \
9369 { \
9370 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9371 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9372 } \
9373 } while (0)
9374#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9375 do \
9376 { \
9377 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9378 { /* likely */ } \
9379 else \
9380 { \
9381 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9382 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9383 } \
9384 } while (0)
9385/**
9386 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9387 * are present.
9388 */
9389#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9390 do \
9391 { \
9392 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9393 { /* likely */ } \
9394 else \
9395 return IEMOP_RAISE_INVALID_OPCODE(); \
9396 } while (0)
9397
9398
9399/**
9400 * Calculates the effective address of a ModR/M memory operand.
9401 *
9402 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9403 *
9404 * @return Strict VBox status code.
9405 * @param pIemCpu The IEM per CPU data.
9406 * @param bRm The ModRM byte.
9407 * @param cbImm The size of any immediate following the
9408 * effective address opcode bytes. Important for
9409 * RIP relative addressing.
9410 * @param pGCPtrEff Where to return the effective address.
9411 */
9412IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9413{
9414 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9415 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9416#define SET_SS_DEF() \
9417 do \
9418 { \
9419 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9420 pIemCpu->iEffSeg = X86_SREG_SS; \
9421 } while (0)
9422
9423 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9424 {
9425/** @todo Check the effective address size crap! */
9426 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9427 {
9428 uint16_t u16EffAddr;
9429
9430 /* Handle the disp16 form with no registers first. */
9431 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9432 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9433 else
9434 {
9435 /* Get the displacment. */
9436 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9437 {
9438 case 0: u16EffAddr = 0; break;
9439 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9440 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9441 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9442 }
9443
9444 /* Add the base and index registers to the disp. */
9445 switch (bRm & X86_MODRM_RM_MASK)
9446 {
9447 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9448 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9449 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9450 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9451 case 4: u16EffAddr += pCtx->si; break;
9452 case 5: u16EffAddr += pCtx->di; break;
9453 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9454 case 7: u16EffAddr += pCtx->bx; break;
9455 }
9456 }
9457
9458 *pGCPtrEff = u16EffAddr;
9459 }
9460 else
9461 {
9462 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9463 uint32_t u32EffAddr;
9464
9465 /* Handle the disp32 form with no registers first. */
9466 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9467 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9468 else
9469 {
9470 /* Get the register (or SIB) value. */
9471 switch ((bRm & X86_MODRM_RM_MASK))
9472 {
9473 case 0: u32EffAddr = pCtx->eax; break;
9474 case 1: u32EffAddr = pCtx->ecx; break;
9475 case 2: u32EffAddr = pCtx->edx; break;
9476 case 3: u32EffAddr = pCtx->ebx; break;
9477 case 4: /* SIB */
9478 {
9479 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9480
9481 /* Get the index and scale it. */
9482 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9483 {
9484 case 0: u32EffAddr = pCtx->eax; break;
9485 case 1: u32EffAddr = pCtx->ecx; break;
9486 case 2: u32EffAddr = pCtx->edx; break;
9487 case 3: u32EffAddr = pCtx->ebx; break;
9488 case 4: u32EffAddr = 0; /*none */ break;
9489 case 5: u32EffAddr = pCtx->ebp; break;
9490 case 6: u32EffAddr = pCtx->esi; break;
9491 case 7: u32EffAddr = pCtx->edi; break;
9492 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9493 }
9494 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9495
9496 /* add base */
9497 switch (bSib & X86_SIB_BASE_MASK)
9498 {
9499 case 0: u32EffAddr += pCtx->eax; break;
9500 case 1: u32EffAddr += pCtx->ecx; break;
9501 case 2: u32EffAddr += pCtx->edx; break;
9502 case 3: u32EffAddr += pCtx->ebx; break;
9503 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9504 case 5:
9505 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9506 {
9507 u32EffAddr += pCtx->ebp;
9508 SET_SS_DEF();
9509 }
9510 else
9511 {
9512 uint32_t u32Disp;
9513 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9514 u32EffAddr += u32Disp;
9515 }
9516 break;
9517 case 6: u32EffAddr += pCtx->esi; break;
9518 case 7: u32EffAddr += pCtx->edi; break;
9519 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9520 }
9521 break;
9522 }
9523 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9524 case 6: u32EffAddr = pCtx->esi; break;
9525 case 7: u32EffAddr = pCtx->edi; break;
9526 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9527 }
9528
9529 /* Get and add the displacement. */
9530 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9531 {
9532 case 0:
9533 break;
9534 case 1:
9535 {
9536 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9537 u32EffAddr += i8Disp;
9538 break;
9539 }
9540 case 2:
9541 {
9542 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9543 u32EffAddr += u32Disp;
9544 break;
9545 }
9546 default:
9547 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9548 }
9549
9550 }
9551 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9552 *pGCPtrEff = u32EffAddr;
9553 else
9554 {
9555 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9556 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9557 }
9558 }
9559 }
9560 else
9561 {
9562 uint64_t u64EffAddr;
9563
9564 /* Handle the rip+disp32 form with no registers first. */
9565 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9566 {
9567 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9568 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9569 }
9570 else
9571 {
9572 /* Get the register (or SIB) value. */
9573 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9574 {
9575 case 0: u64EffAddr = pCtx->rax; break;
9576 case 1: u64EffAddr = pCtx->rcx; break;
9577 case 2: u64EffAddr = pCtx->rdx; break;
9578 case 3: u64EffAddr = pCtx->rbx; break;
9579 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9580 case 6: u64EffAddr = pCtx->rsi; break;
9581 case 7: u64EffAddr = pCtx->rdi; break;
9582 case 8: u64EffAddr = pCtx->r8; break;
9583 case 9: u64EffAddr = pCtx->r9; break;
9584 case 10: u64EffAddr = pCtx->r10; break;
9585 case 11: u64EffAddr = pCtx->r11; break;
9586 case 13: u64EffAddr = pCtx->r13; break;
9587 case 14: u64EffAddr = pCtx->r14; break;
9588 case 15: u64EffAddr = pCtx->r15; break;
9589 /* SIB */
9590 case 4:
9591 case 12:
9592 {
9593 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9594
9595 /* Get the index and scale it. */
9596 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9597 {
9598 case 0: u64EffAddr = pCtx->rax; break;
9599 case 1: u64EffAddr = pCtx->rcx; break;
9600 case 2: u64EffAddr = pCtx->rdx; break;
9601 case 3: u64EffAddr = pCtx->rbx; break;
9602 case 4: u64EffAddr = 0; /*none */ break;
9603 case 5: u64EffAddr = pCtx->rbp; break;
9604 case 6: u64EffAddr = pCtx->rsi; break;
9605 case 7: u64EffAddr = pCtx->rdi; break;
9606 case 8: u64EffAddr = pCtx->r8; break;
9607 case 9: u64EffAddr = pCtx->r9; break;
9608 case 10: u64EffAddr = pCtx->r10; break;
9609 case 11: u64EffAddr = pCtx->r11; break;
9610 case 12: u64EffAddr = pCtx->r12; break;
9611 case 13: u64EffAddr = pCtx->r13; break;
9612 case 14: u64EffAddr = pCtx->r14; break;
9613 case 15: u64EffAddr = pCtx->r15; break;
9614 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9615 }
9616 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9617
9618 /* add base */
9619 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9620 {
9621 case 0: u64EffAddr += pCtx->rax; break;
9622 case 1: u64EffAddr += pCtx->rcx; break;
9623 case 2: u64EffAddr += pCtx->rdx; break;
9624 case 3: u64EffAddr += pCtx->rbx; break;
9625 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9626 case 6: u64EffAddr += pCtx->rsi; break;
9627 case 7: u64EffAddr += pCtx->rdi; break;
9628 case 8: u64EffAddr += pCtx->r8; break;
9629 case 9: u64EffAddr += pCtx->r9; break;
9630 case 10: u64EffAddr += pCtx->r10; break;
9631 case 11: u64EffAddr += pCtx->r11; break;
9632 case 12: u64EffAddr += pCtx->r12; break;
9633 case 14: u64EffAddr += pCtx->r14; break;
9634 case 15: u64EffAddr += pCtx->r15; break;
9635 /* complicated encodings */
9636 case 5:
9637 case 13:
9638 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9639 {
9640 if (!pIemCpu->uRexB)
9641 {
9642 u64EffAddr += pCtx->rbp;
9643 SET_SS_DEF();
9644 }
9645 else
9646 u64EffAddr += pCtx->r13;
9647 }
9648 else
9649 {
9650 uint32_t u32Disp;
9651 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9652 u64EffAddr += (int32_t)u32Disp;
9653 }
9654 break;
9655 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9656 }
9657 break;
9658 }
9659 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9660 }
9661
9662 /* Get and add the displacement. */
9663 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9664 {
9665 case 0:
9666 break;
9667 case 1:
9668 {
9669 int8_t i8Disp;
9670 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9671 u64EffAddr += i8Disp;
9672 break;
9673 }
9674 case 2:
9675 {
9676 uint32_t u32Disp;
9677 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9678 u64EffAddr += (int32_t)u32Disp;
9679 break;
9680 }
9681 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9682 }
9683
9684 }
9685
9686 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9687 *pGCPtrEff = u64EffAddr;
9688 else
9689 {
9690 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9691 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9692 }
9693 }
9694
9695 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9696 return VINF_SUCCESS;
9697}
9698
9699/** @} */
9700
9701
9702
9703/*
9704 * Include the instructions
9705 */
9706#include "IEMAllInstructions.cpp.h"
9707
9708
9709
9710
9711#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9712
9713/**
9714 * Sets up execution verification mode.
9715 */
9716IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9717{
9718 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9719 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9720
9721 /*
9722 * Always note down the address of the current instruction.
9723 */
9724 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9725 pIemCpu->uOldRip = pOrgCtx->rip;
9726
9727 /*
9728 * Enable verification and/or logging.
9729 */
9730 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9731 if ( fNewNoRem
9732 && ( 0
9733#if 0 /* auto enable on first paged protected mode interrupt */
9734 || ( pOrgCtx->eflags.Bits.u1IF
9735 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9736 && TRPMHasTrap(pVCpu)
9737 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9738#endif
9739#if 0
9740 || ( pOrgCtx->cs == 0x10
9741 && ( pOrgCtx->rip == 0x90119e3e
9742 || pOrgCtx->rip == 0x901d9810)
9743#endif
9744#if 0 /* Auto enable DSL - FPU stuff. */
9745 || ( pOrgCtx->cs == 0x10
9746 && (// pOrgCtx->rip == 0xc02ec07f
9747 //|| pOrgCtx->rip == 0xc02ec082
9748 //|| pOrgCtx->rip == 0xc02ec0c9
9749 0
9750 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9751#endif
9752#if 0 /* Auto enable DSL - fstp st0 stuff. */
9753 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9754#endif
9755#if 0
9756 || pOrgCtx->rip == 0x9022bb3a
9757#endif
9758#if 0
9759 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9760#endif
9761#if 0
9762 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9763 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9764#endif
9765#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9766 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9767 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9768 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9769#endif
9770#if 0 /* NT4SP1 - xadd early boot. */
9771 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9772#endif
9773#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9774 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9775#endif
9776#if 0 /* NT4SP1 - cmpxchg (AMD). */
9777 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9778#endif
9779#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9780 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9781#endif
9782#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9783 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9784
9785#endif
9786#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9787 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9788
9789#endif
9790#if 0 /* NT4SP1 - frstor [ecx] */
9791 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9792#endif
9793#if 0 /* xxxxxx - All long mode code. */
9794 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9795#endif
9796#if 0 /* rep movsq linux 3.7 64-bit boot. */
9797 || (pOrgCtx->rip == 0x0000000000100241)
9798#endif
9799#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9800 || (pOrgCtx->rip == 0x000000000215e240)
9801#endif
9802#if 0 /* DOS's size-overridden iret to v8086. */
9803 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9804#endif
9805 )
9806 )
9807 {
9808 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9809 RTLogFlags(NULL, "enabled");
9810 fNewNoRem = false;
9811 }
9812 if (fNewNoRem != pIemCpu->fNoRem)
9813 {
9814 pIemCpu->fNoRem = fNewNoRem;
9815 if (!fNewNoRem)
9816 {
9817 LogAlways(("Enabling verification mode!\n"));
9818 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9819 }
9820 else
9821 LogAlways(("Disabling verification mode!\n"));
9822 }
9823
9824 /*
9825 * Switch state.
9826 */
9827 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9828 {
9829 static CPUMCTX s_DebugCtx; /* Ugly! */
9830
9831 s_DebugCtx = *pOrgCtx;
9832 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9833 }
9834
9835 /*
9836 * See if there is an interrupt pending in TRPM and inject it if we can.
9837 */
9838 pIemCpu->uInjectCpl = UINT8_MAX;
9839 if ( pOrgCtx->eflags.Bits.u1IF
9840 && TRPMHasTrap(pVCpu)
9841 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9842 {
9843 uint8_t u8TrapNo;
9844 TRPMEVENT enmType;
9845 RTGCUINT uErrCode;
9846 RTGCPTR uCr2;
9847 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9848 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9849 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9850 TRPMResetTrap(pVCpu);
9851 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9852 }
9853
9854 /*
9855 * Reset the counters.
9856 */
9857 pIemCpu->cIOReads = 0;
9858 pIemCpu->cIOWrites = 0;
9859 pIemCpu->fIgnoreRaxRdx = false;
9860 pIemCpu->fOverlappingMovs = false;
9861 pIemCpu->fProblematicMemory = false;
9862 pIemCpu->fUndefinedEFlags = 0;
9863
9864 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9865 {
9866 /*
9867 * Free all verification records.
9868 */
9869 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9870 pIemCpu->pIemEvtRecHead = NULL;
9871 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9872 do
9873 {
9874 while (pEvtRec)
9875 {
9876 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9877 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9878 pIemCpu->pFreeEvtRec = pEvtRec;
9879 pEvtRec = pNext;
9880 }
9881 pEvtRec = pIemCpu->pOtherEvtRecHead;
9882 pIemCpu->pOtherEvtRecHead = NULL;
9883 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9884 } while (pEvtRec);
9885 }
9886}
9887
9888
9889/**
9890 * Allocate an event record.
9891 * @returns Pointer to a record.
9892 */
9893IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9894{
9895 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9896 return NULL;
9897
9898 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9899 if (pEvtRec)
9900 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9901 else
9902 {
9903 if (!pIemCpu->ppIemEvtRecNext)
9904 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9905
9906 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9907 if (!pEvtRec)
9908 return NULL;
9909 }
9910 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9911 pEvtRec->pNext = NULL;
9912 return pEvtRec;
9913}
9914
9915
9916/**
9917 * IOMMMIORead notification.
9918 */
9919VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9920{
9921 PVMCPU pVCpu = VMMGetCpu(pVM);
9922 if (!pVCpu)
9923 return;
9924 PIEMCPU pIemCpu = &pVCpu->iem.s;
9925 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9926 if (!pEvtRec)
9927 return;
9928 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9929 pEvtRec->u.RamRead.GCPhys = GCPhys;
9930 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9931 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9932 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9933}
9934
9935
9936/**
9937 * IOMMMIOWrite notification.
9938 */
9939VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
9940{
9941 PVMCPU pVCpu = VMMGetCpu(pVM);
9942 if (!pVCpu)
9943 return;
9944 PIEMCPU pIemCpu = &pVCpu->iem.s;
9945 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9946 if (!pEvtRec)
9947 return;
9948 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
9949 pEvtRec->u.RamWrite.GCPhys = GCPhys;
9950 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
9951 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
9952 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
9953 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
9954 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
9955 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9956 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9957}
9958
9959
9960/**
9961 * IOMIOPortRead notification.
9962 */
9963VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
9964{
9965 PVMCPU pVCpu = VMMGetCpu(pVM);
9966 if (!pVCpu)
9967 return;
9968 PIEMCPU pIemCpu = &pVCpu->iem.s;
9969 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9970 if (!pEvtRec)
9971 return;
9972 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9973 pEvtRec->u.IOPortRead.Port = Port;
9974 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9975 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9976 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9977}
9978
9979/**
9980 * IOMIOPortWrite notification.
9981 */
9982VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9983{
9984 PVMCPU pVCpu = VMMGetCpu(pVM);
9985 if (!pVCpu)
9986 return;
9987 PIEMCPU pIemCpu = &pVCpu->iem.s;
9988 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9989 if (!pEvtRec)
9990 return;
9991 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
9992 pEvtRec->u.IOPortWrite.Port = Port;
9993 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
9994 pEvtRec->u.IOPortWrite.u32Value = u32Value;
9995 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9996 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9997}
9998
9999
10000VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
10001{
10002 AssertFailed();
10003}
10004
10005
10006VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
10007{
10008 AssertFailed();
10009}
10010
10011
10012/**
10013 * Fakes and records an I/O port read.
10014 *
10015 * @returns VINF_SUCCESS.
10016 * @param pIemCpu The IEM per CPU data.
10017 * @param Port The I/O port.
10018 * @param pu32Value Where to store the fake value.
10019 * @param cbValue The size of the access.
10020 */
10021IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10022{
10023 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10024 if (pEvtRec)
10025 {
10026 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10027 pEvtRec->u.IOPortRead.Port = Port;
10028 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
10029 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10030 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10031 }
10032 pIemCpu->cIOReads++;
10033 *pu32Value = 0xcccccccc;
10034 return VINF_SUCCESS;
10035}
10036
10037
10038/**
10039 * Fakes and records an I/O port write.
10040 *
10041 * @returns VINF_SUCCESS.
10042 * @param pIemCpu The IEM per CPU data.
10043 * @param Port The I/O port.
10044 * @param u32Value The value being written.
10045 * @param cbValue The size of the access.
10046 */
10047IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10048{
10049 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10050 if (pEvtRec)
10051 {
10052 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10053 pEvtRec->u.IOPortWrite.Port = Port;
10054 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10055 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10056 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10057 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10058 }
10059 pIemCpu->cIOWrites++;
10060 return VINF_SUCCESS;
10061}
10062
10063
10064/**
10065 * Used to add extra details about a stub case.
10066 * @param pIemCpu The IEM per CPU state.
10067 */
10068IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10069{
10070 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10071 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10072 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10073 char szRegs[4096];
10074 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10075 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10076 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10077 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10078 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10079 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10080 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10081 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10082 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10083 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10084 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10085 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10086 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10087 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10088 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10089 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10090 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10091 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10092 " efer=%016VR{efer}\n"
10093 " pat=%016VR{pat}\n"
10094 " sf_mask=%016VR{sf_mask}\n"
10095 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10096 " lstar=%016VR{lstar}\n"
10097 " star=%016VR{star} cstar=%016VR{cstar}\n"
10098 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10099 );
10100
10101 char szInstr1[256];
10102 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10103 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10104 szInstr1, sizeof(szInstr1), NULL);
10105 char szInstr2[256];
10106 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10107 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10108 szInstr2, sizeof(szInstr2), NULL);
10109
10110 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10111}
10112
10113
10114/**
10115 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10116 * dump to the assertion info.
10117 *
10118 * @param pEvtRec The record to dump.
10119 */
10120IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10121{
10122 switch (pEvtRec->enmEvent)
10123 {
10124 case IEMVERIFYEVENT_IOPORT_READ:
10125 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10126 pEvtRec->u.IOPortWrite.Port,
10127 pEvtRec->u.IOPortWrite.cbValue);
10128 break;
10129 case IEMVERIFYEVENT_IOPORT_WRITE:
10130 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10131 pEvtRec->u.IOPortWrite.Port,
10132 pEvtRec->u.IOPortWrite.cbValue,
10133 pEvtRec->u.IOPortWrite.u32Value);
10134 break;
10135 case IEMVERIFYEVENT_RAM_READ:
10136 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10137 pEvtRec->u.RamRead.GCPhys,
10138 pEvtRec->u.RamRead.cb);
10139 break;
10140 case IEMVERIFYEVENT_RAM_WRITE:
10141 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10142 pEvtRec->u.RamWrite.GCPhys,
10143 pEvtRec->u.RamWrite.cb,
10144 (int)pEvtRec->u.RamWrite.cb,
10145 pEvtRec->u.RamWrite.ab);
10146 break;
10147 default:
10148 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10149 break;
10150 }
10151}
10152
10153
10154/**
10155 * Raises an assertion on the specified record, showing the given message with
10156 * a record dump attached.
10157 *
10158 * @param pIemCpu The IEM per CPU data.
10159 * @param pEvtRec1 The first record.
10160 * @param pEvtRec2 The second record.
10161 * @param pszMsg The message explaining why we're asserting.
10162 */
10163IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10164{
10165 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10166 iemVerifyAssertAddRecordDump(pEvtRec1);
10167 iemVerifyAssertAddRecordDump(pEvtRec2);
10168 iemVerifyAssertMsg2(pIemCpu);
10169 RTAssertPanic();
10170}
10171
10172
10173/**
10174 * Raises an assertion on the specified record, showing the given message with
10175 * a record dump attached.
10176 *
10177 * @param pIemCpu The IEM per CPU data.
10178 * @param pEvtRec1 The first record.
10179 * @param pszMsg The message explaining why we're asserting.
10180 */
10181IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10182{
10183 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10184 iemVerifyAssertAddRecordDump(pEvtRec);
10185 iemVerifyAssertMsg2(pIemCpu);
10186 RTAssertPanic();
10187}
10188
10189
10190/**
10191 * Verifies a write record.
10192 *
10193 * @param pIemCpu The IEM per CPU data.
10194 * @param pEvtRec The write record.
10195 * @param fRem Set if REM was doing the other executing. If clear
10196 * it was HM.
10197 */
10198IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10199{
10200 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10201 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10202 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10203 if ( RT_FAILURE(rc)
10204 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10205 {
10206 /* fend off ins */
10207 if ( !pIemCpu->cIOReads
10208 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10209 || ( pEvtRec->u.RamWrite.cb != 1
10210 && pEvtRec->u.RamWrite.cb != 2
10211 && pEvtRec->u.RamWrite.cb != 4) )
10212 {
10213 /* fend off ROMs and MMIO */
10214 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10215 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10216 {
10217 /* fend off fxsave */
10218 if (pEvtRec->u.RamWrite.cb != 512)
10219 {
10220 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10221 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10222 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10223 RTAssertMsg2Add("%s: %.*Rhxs\n"
10224 "iem: %.*Rhxs\n",
10225 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10226 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10227 iemVerifyAssertAddRecordDump(pEvtRec);
10228 iemVerifyAssertMsg2(pIemCpu);
10229 RTAssertPanic();
10230 }
10231 }
10232 }
10233 }
10234
10235}
10236
10237/**
10238 * Performs the post-execution verfication checks.
10239 */
10240IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10241{
10242 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10243 return;
10244
10245 /*
10246 * Switch back the state.
10247 */
10248 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10249 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10250 Assert(pOrgCtx != pDebugCtx);
10251 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10252
10253 /*
10254 * Execute the instruction in REM.
10255 */
10256 bool fRem = false;
10257 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10258 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10259 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10260#ifdef IEM_VERIFICATION_MODE_FULL_HM
10261 if ( HMIsEnabled(pVM)
10262 && pIemCpu->cIOReads == 0
10263 && pIemCpu->cIOWrites == 0
10264 && !pIemCpu->fProblematicMemory)
10265 {
10266 uint64_t uStartRip = pOrgCtx->rip;
10267 unsigned iLoops = 0;
10268 do
10269 {
10270 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10271 iLoops++;
10272 } while ( rc == VINF_SUCCESS
10273 || ( rc == VINF_EM_DBG_STEPPED
10274 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10275 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10276 || ( pOrgCtx->rip != pDebugCtx->rip
10277 && pIemCpu->uInjectCpl != UINT8_MAX
10278 && iLoops < 8) );
10279 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10280 rc = VINF_SUCCESS;
10281 }
10282#endif
10283 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10284 || rc == VINF_IOM_R3_IOPORT_READ
10285 || rc == VINF_IOM_R3_IOPORT_WRITE
10286 || rc == VINF_IOM_R3_MMIO_READ
10287 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10288 || rc == VINF_IOM_R3_MMIO_WRITE
10289 || rc == VINF_CPUM_R3_MSR_READ
10290 || rc == VINF_CPUM_R3_MSR_WRITE
10291 || rc == VINF_EM_RESCHEDULE
10292 )
10293 {
10294 EMRemLock(pVM);
10295 rc = REMR3EmulateInstruction(pVM, pVCpu);
10296 AssertRC(rc);
10297 EMRemUnlock(pVM);
10298 fRem = true;
10299 }
10300
10301 /*
10302 * Compare the register states.
10303 */
10304 unsigned cDiffs = 0;
10305 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10306 {
10307 //Log(("REM and IEM ends up with different registers!\n"));
10308 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10309
10310# define CHECK_FIELD(a_Field) \
10311 do \
10312 { \
10313 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10314 { \
10315 switch (sizeof(pOrgCtx->a_Field)) \
10316 { \
10317 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10318 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10319 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10320 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10321 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10322 } \
10323 cDiffs++; \
10324 } \
10325 } while (0)
10326# define CHECK_XSTATE_FIELD(a_Field) \
10327 do \
10328 { \
10329 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10330 { \
10331 switch (sizeof(pOrgCtx->a_Field)) \
10332 { \
10333 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10334 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10335 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10336 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10337 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10338 } \
10339 cDiffs++; \
10340 } \
10341 } while (0)
10342
10343# define CHECK_BIT_FIELD(a_Field) \
10344 do \
10345 { \
10346 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10347 { \
10348 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10349 cDiffs++; \
10350 } \
10351 } while (0)
10352
10353# define CHECK_SEL(a_Sel) \
10354 do \
10355 { \
10356 CHECK_FIELD(a_Sel.Sel); \
10357 CHECK_FIELD(a_Sel.Attr.u); \
10358 CHECK_FIELD(a_Sel.u64Base); \
10359 CHECK_FIELD(a_Sel.u32Limit); \
10360 CHECK_FIELD(a_Sel.fFlags); \
10361 } while (0)
10362
10363 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10364 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10365
10366#if 1 /* The recompiler doesn't update these the intel way. */
10367 if (fRem)
10368 {
10369 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10370 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10371 pOrgXState->x87.CS = pDebugXState->x87.CS;
10372 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10373 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10374 pOrgXState->x87.DS = pDebugXState->x87.DS;
10375 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10376 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10377 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10378 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10379 }
10380#endif
10381 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10382 {
10383 RTAssertMsg2Weak(" the FPU state differs\n");
10384 cDiffs++;
10385 CHECK_XSTATE_FIELD(x87.FCW);
10386 CHECK_XSTATE_FIELD(x87.FSW);
10387 CHECK_XSTATE_FIELD(x87.FTW);
10388 CHECK_XSTATE_FIELD(x87.FOP);
10389 CHECK_XSTATE_FIELD(x87.FPUIP);
10390 CHECK_XSTATE_FIELD(x87.CS);
10391 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10392 CHECK_XSTATE_FIELD(x87.FPUDP);
10393 CHECK_XSTATE_FIELD(x87.DS);
10394 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10395 CHECK_XSTATE_FIELD(x87.MXCSR);
10396 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10397 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10398 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10399 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10400 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10401 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10402 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10403 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10404 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10405 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10406 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10407 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10408 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10409 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10410 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10411 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10412 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10413 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10414 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10415 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10416 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10417 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10418 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10419 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10420 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10421 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10422 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10423 }
10424 CHECK_FIELD(rip);
10425 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10426 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10427 {
10428 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10429 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10430 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10431 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10432 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10433 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10434 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10435 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10436 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10437 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10438 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10439 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10440 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10441 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10442 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10443 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10444 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10445 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10446 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10447 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10448 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10449 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10450 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10451 }
10452
10453 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10454 CHECK_FIELD(rax);
10455 CHECK_FIELD(rcx);
10456 if (!pIemCpu->fIgnoreRaxRdx)
10457 CHECK_FIELD(rdx);
10458 CHECK_FIELD(rbx);
10459 CHECK_FIELD(rsp);
10460 CHECK_FIELD(rbp);
10461 CHECK_FIELD(rsi);
10462 CHECK_FIELD(rdi);
10463 CHECK_FIELD(r8);
10464 CHECK_FIELD(r9);
10465 CHECK_FIELD(r10);
10466 CHECK_FIELD(r11);
10467 CHECK_FIELD(r12);
10468 CHECK_FIELD(r13);
10469 CHECK_SEL(cs);
10470 CHECK_SEL(ss);
10471 CHECK_SEL(ds);
10472 CHECK_SEL(es);
10473 CHECK_SEL(fs);
10474 CHECK_SEL(gs);
10475 CHECK_FIELD(cr0);
10476
10477 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10478 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10479 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10480 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10481 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10482 {
10483 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10484 { /* ignore */ }
10485 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10486 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10487 && fRem)
10488 { /* ignore */ }
10489 else
10490 CHECK_FIELD(cr2);
10491 }
10492 CHECK_FIELD(cr3);
10493 CHECK_FIELD(cr4);
10494 CHECK_FIELD(dr[0]);
10495 CHECK_FIELD(dr[1]);
10496 CHECK_FIELD(dr[2]);
10497 CHECK_FIELD(dr[3]);
10498 CHECK_FIELD(dr[6]);
10499 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10500 CHECK_FIELD(dr[7]);
10501 CHECK_FIELD(gdtr.cbGdt);
10502 CHECK_FIELD(gdtr.pGdt);
10503 CHECK_FIELD(idtr.cbIdt);
10504 CHECK_FIELD(idtr.pIdt);
10505 CHECK_SEL(ldtr);
10506 CHECK_SEL(tr);
10507 CHECK_FIELD(SysEnter.cs);
10508 CHECK_FIELD(SysEnter.eip);
10509 CHECK_FIELD(SysEnter.esp);
10510 CHECK_FIELD(msrEFER);
10511 CHECK_FIELD(msrSTAR);
10512 CHECK_FIELD(msrPAT);
10513 CHECK_FIELD(msrLSTAR);
10514 CHECK_FIELD(msrCSTAR);
10515 CHECK_FIELD(msrSFMASK);
10516 CHECK_FIELD(msrKERNELGSBASE);
10517
10518 if (cDiffs != 0)
10519 {
10520 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10521 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10522 iemVerifyAssertMsg2(pIemCpu);
10523 RTAssertPanic();
10524 }
10525# undef CHECK_FIELD
10526# undef CHECK_BIT_FIELD
10527 }
10528
10529 /*
10530 * If the register state compared fine, check the verification event
10531 * records.
10532 */
10533 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10534 {
10535 /*
10536 * Compare verficiation event records.
10537 * - I/O port accesses should be a 1:1 match.
10538 */
10539 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10540 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10541 while (pIemRec && pOtherRec)
10542 {
10543 /* Since we might miss RAM writes and reads, ignore reads and check
10544 that any written memory is the same extra ones. */
10545 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10546 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10547 && pIemRec->pNext)
10548 {
10549 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10550 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10551 pIemRec = pIemRec->pNext;
10552 }
10553
10554 /* Do the compare. */
10555 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10556 {
10557 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10558 break;
10559 }
10560 bool fEquals;
10561 switch (pIemRec->enmEvent)
10562 {
10563 case IEMVERIFYEVENT_IOPORT_READ:
10564 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10565 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10566 break;
10567 case IEMVERIFYEVENT_IOPORT_WRITE:
10568 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10569 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10570 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10571 break;
10572 case IEMVERIFYEVENT_RAM_READ:
10573 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10574 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10575 break;
10576 case IEMVERIFYEVENT_RAM_WRITE:
10577 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10578 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10579 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10580 break;
10581 default:
10582 fEquals = false;
10583 break;
10584 }
10585 if (!fEquals)
10586 {
10587 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10588 break;
10589 }
10590
10591 /* advance */
10592 pIemRec = pIemRec->pNext;
10593 pOtherRec = pOtherRec->pNext;
10594 }
10595
10596 /* Ignore extra writes and reads. */
10597 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10598 {
10599 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10600 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10601 pIemRec = pIemRec->pNext;
10602 }
10603 if (pIemRec != NULL)
10604 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10605 else if (pOtherRec != NULL)
10606 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10607 }
10608 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10609}
10610
10611#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10612
10613/* stubs */
10614IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10615{
10616 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10617 return VERR_INTERNAL_ERROR;
10618}
10619
10620IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10621{
10622 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10623 return VERR_INTERNAL_ERROR;
10624}
10625
10626#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10627
10628
10629#ifdef LOG_ENABLED
10630/**
10631 * Logs the current instruction.
10632 * @param pVCpu The cross context virtual CPU structure of the caller.
10633 * @param pCtx The current CPU context.
10634 * @param fSameCtx Set if we have the same context information as the VMM,
10635 * clear if we may have already executed an instruction in
10636 * our debug context. When clear, we assume IEMCPU holds
10637 * valid CPU mode info.
10638 */
10639IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10640{
10641# ifdef IN_RING3
10642 if (LogIs2Enabled())
10643 {
10644 char szInstr[256];
10645 uint32_t cbInstr = 0;
10646 if (fSameCtx)
10647 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10648 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10649 szInstr, sizeof(szInstr), &cbInstr);
10650 else
10651 {
10652 uint32_t fFlags = 0;
10653 switch (pVCpu->iem.s.enmCpuMode)
10654 {
10655 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10656 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10657 case IEMMODE_16BIT:
10658 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10659 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10660 else
10661 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10662 break;
10663 }
10664 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10665 szInstr, sizeof(szInstr), &cbInstr);
10666 }
10667
10668 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10669 Log2(("****\n"
10670 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10671 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10672 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10673 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10674 " %s\n"
10675 ,
10676 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10677 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10678 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10679 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10680 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10681 szInstr));
10682
10683 if (LogIs3Enabled())
10684 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10685 }
10686 else
10687# endif
10688 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10689 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10690}
10691#endif
10692
10693
10694/**
10695 * Makes status code addjustments (pass up from I/O and access handler)
10696 * as well as maintaining statistics.
10697 *
10698 * @returns Strict VBox status code to pass up.
10699 * @param pIemCpu The IEM per CPU data.
10700 * @param rcStrict The status from executing an instruction.
10701 */
10702DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10703{
10704 if (rcStrict != VINF_SUCCESS)
10705 {
10706 if (RT_SUCCESS(rcStrict))
10707 {
10708 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10709 || rcStrict == VINF_IOM_R3_IOPORT_READ
10710 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10711 || rcStrict == VINF_IOM_R3_MMIO_READ
10712 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10713 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10714 || rcStrict == VINF_CPUM_R3_MSR_READ
10715 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10716 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10717 /* raw-mode / virt handlers only: */
10718 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10719 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10720 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10721 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10722 || rcStrict == VINF_SELM_SYNC_GDT
10723 || rcStrict == VINF_CSAM_PENDING_ACTION
10724 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10725 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10726/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10727 int32_t const rcPassUp = pIemCpu->rcPassUp;
10728 if (rcPassUp == VINF_SUCCESS)
10729 pIemCpu->cRetInfStatuses++;
10730 else if ( rcPassUp < VINF_EM_FIRST
10731 || rcPassUp > VINF_EM_LAST
10732 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10733 {
10734 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10735 pIemCpu->cRetPassUpStatus++;
10736 rcStrict = rcPassUp;
10737 }
10738 else
10739 {
10740 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10741 pIemCpu->cRetInfStatuses++;
10742 }
10743 }
10744 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10745 pIemCpu->cRetAspectNotImplemented++;
10746 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10747 pIemCpu->cRetInstrNotImplemented++;
10748#ifdef IEM_VERIFICATION_MODE_FULL
10749 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10750 rcStrict = VINF_SUCCESS;
10751#endif
10752 else
10753 pIemCpu->cRetErrStatuses++;
10754 }
10755 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10756 {
10757 pIemCpu->cRetPassUpStatus++;
10758 rcStrict = pIemCpu->rcPassUp;
10759 }
10760
10761 return rcStrict;
10762}
10763
10764
10765/**
10766 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10767 * IEMExecOneWithPrefetchedByPC.
10768 *
10769 * @return Strict VBox status code.
10770 * @param pVCpu The current virtual CPU.
10771 * @param pIemCpu The IEM per CPU data.
10772 * @param fExecuteInhibit If set, execute the instruction following CLI,
10773 * POP SS and MOV SS,GR.
10774 */
10775DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10776{
10777 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10778 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10779 if (rcStrict == VINF_SUCCESS)
10780 pIemCpu->cInstructions++;
10781 if (pIemCpu->cActiveMappings > 0)
10782 iemMemRollback(pIemCpu);
10783//#ifdef DEBUG
10784// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10785//#endif
10786
10787 /* Execute the next instruction as well if a cli, pop ss or
10788 mov ss, Gr has just completed successfully. */
10789 if ( fExecuteInhibit
10790 && rcStrict == VINF_SUCCESS
10791 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10792 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10793 {
10794 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10795 if (rcStrict == VINF_SUCCESS)
10796 {
10797# ifdef LOG_ENABLED
10798 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10799# endif
10800 IEM_OPCODE_GET_NEXT_U8(&b);
10801 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10802 if (rcStrict == VINF_SUCCESS)
10803 pIemCpu->cInstructions++;
10804 if (pIemCpu->cActiveMappings > 0)
10805 iemMemRollback(pIemCpu);
10806 }
10807 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10808 }
10809
10810 /*
10811 * Return value fiddling, statistics and sanity assertions.
10812 */
10813 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10814
10815 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10816 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10817#if defined(IEM_VERIFICATION_MODE_FULL)
10818 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10819 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10820 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10821 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10822#endif
10823 return rcStrict;
10824}
10825
10826
10827#ifdef IN_RC
10828/**
10829 * Re-enters raw-mode or ensure we return to ring-3.
10830 *
10831 * @returns rcStrict, maybe modified.
10832 * @param pIemCpu The IEM CPU structure.
10833 * @param pVCpu The cross context virtual CPU structure of the caller.
10834 * @param pCtx The current CPU context.
10835 * @param rcStrict The status code returne by the interpreter.
10836 */
10837DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10838{
10839 if (!pIemCpu->fInPatchCode)
10840 CPUMRawEnter(pVCpu);
10841 return rcStrict;
10842}
10843#endif
10844
10845
10846/**
10847 * Execute one instruction.
10848 *
10849 * @return Strict VBox status code.
10850 * @param pVCpu The current virtual CPU.
10851 */
10852VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10853{
10854 PIEMCPU pIemCpu = &pVCpu->iem.s;
10855
10856#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10857 iemExecVerificationModeSetup(pIemCpu);
10858#endif
10859#ifdef LOG_ENABLED
10860 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10861 iemLogCurInstr(pVCpu, pCtx, true);
10862#endif
10863
10864 /*
10865 * Do the decoding and emulation.
10866 */
10867 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10868 if (rcStrict == VINF_SUCCESS)
10869 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10870
10871#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10872 /*
10873 * Assert some sanity.
10874 */
10875 iemExecVerificationModeCheck(pIemCpu);
10876#endif
10877#ifdef IN_RC
10878 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10879#endif
10880 if (rcStrict != VINF_SUCCESS)
10881 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10882 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10883 return rcStrict;
10884}
10885
10886
10887VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10888{
10889 PIEMCPU pIemCpu = &pVCpu->iem.s;
10890 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10891 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10892
10893 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10894 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10895 if (rcStrict == VINF_SUCCESS)
10896 {
10897 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10898 if (pcbWritten)
10899 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10900 }
10901
10902#ifdef IN_RC
10903 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10904#endif
10905 return rcStrict;
10906}
10907
10908
10909VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10910 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10911{
10912 PIEMCPU pIemCpu = &pVCpu->iem.s;
10913 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10914 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10915
10916 VBOXSTRICTRC rcStrict;
10917 if ( cbOpcodeBytes
10918 && pCtx->rip == OpcodeBytesPC)
10919 {
10920 iemInitDecoder(pIemCpu, false);
10921 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10922 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10923 rcStrict = VINF_SUCCESS;
10924 }
10925 else
10926 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10927 if (rcStrict == VINF_SUCCESS)
10928 {
10929 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10930 }
10931
10932#ifdef IN_RC
10933 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10934#endif
10935 return rcStrict;
10936}
10937
10938
10939VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10940{
10941 PIEMCPU pIemCpu = &pVCpu->iem.s;
10942 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10943 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10944
10945 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10946 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10947 if (rcStrict == VINF_SUCCESS)
10948 {
10949 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10950 if (pcbWritten)
10951 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10952 }
10953
10954#ifdef IN_RC
10955 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10956#endif
10957 return rcStrict;
10958}
10959
10960
10961VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10962 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10963{
10964 PIEMCPU pIemCpu = &pVCpu->iem.s;
10965 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10966 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10967
10968 VBOXSTRICTRC rcStrict;
10969 if ( cbOpcodeBytes
10970 && pCtx->rip == OpcodeBytesPC)
10971 {
10972 iemInitDecoder(pIemCpu, true);
10973 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10974 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10975 rcStrict = VINF_SUCCESS;
10976 }
10977 else
10978 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10979 if (rcStrict == VINF_SUCCESS)
10980 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10981
10982#ifdef IN_RC
10983 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10984#endif
10985 return rcStrict;
10986}
10987
10988
10989VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
10990{
10991 PIEMCPU pIemCpu = &pVCpu->iem.s;
10992
10993 /*
10994 * See if there is an interrupt pending in TRPM and inject it if we can.
10995 */
10996#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
10997 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10998# ifdef IEM_VERIFICATION_MODE_FULL
10999 pIemCpu->uInjectCpl = UINT8_MAX;
11000# endif
11001 if ( pCtx->eflags.Bits.u1IF
11002 && TRPMHasTrap(pVCpu)
11003 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11004 {
11005 uint8_t u8TrapNo;
11006 TRPMEVENT enmType;
11007 RTGCUINT uErrCode;
11008 RTGCPTR uCr2;
11009 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11010 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11011 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11012 TRPMResetTrap(pVCpu);
11013 }
11014#else
11015 iemExecVerificationModeSetup(pIemCpu);
11016 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11017#endif
11018
11019 /*
11020 * Log the state.
11021 */
11022#ifdef LOG_ENABLED
11023 iemLogCurInstr(pVCpu, pCtx, true);
11024#endif
11025
11026 /*
11027 * Do the decoding and emulation.
11028 */
11029 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11030 if (rcStrict == VINF_SUCCESS)
11031 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11032
11033#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11034 /*
11035 * Assert some sanity.
11036 */
11037 iemExecVerificationModeCheck(pIemCpu);
11038#endif
11039
11040 /*
11041 * Maybe re-enter raw-mode and log.
11042 */
11043#ifdef IN_RC
11044 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11045#endif
11046 if (rcStrict != VINF_SUCCESS)
11047 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11048 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11049 return rcStrict;
11050}
11051
11052
11053
11054/**
11055 * Injects a trap, fault, abort, software interrupt or external interrupt.
11056 *
11057 * The parameter list matches TRPMQueryTrapAll pretty closely.
11058 *
11059 * @returns Strict VBox status code.
11060 * @param pVCpu The current virtual CPU.
11061 * @param u8TrapNo The trap number.
11062 * @param enmType What type is it (trap/fault/abort), software
11063 * interrupt or hardware interrupt.
11064 * @param uErrCode The error code if applicable.
11065 * @param uCr2 The CR2 value if applicable.
11066 * @param cbInstr The instruction length (only relevant for
11067 * software interrupts).
11068 */
11069VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11070 uint8_t cbInstr)
11071{
11072 iemInitDecoder(&pVCpu->iem.s, false);
11073#ifdef DBGFTRACE_ENABLED
11074 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11075 u8TrapNo, enmType, uErrCode, uCr2);
11076#endif
11077
11078 uint32_t fFlags;
11079 switch (enmType)
11080 {
11081 case TRPM_HARDWARE_INT:
11082 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11083 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11084 uErrCode = uCr2 = 0;
11085 break;
11086
11087 case TRPM_SOFTWARE_INT:
11088 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11089 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11090 uErrCode = uCr2 = 0;
11091 break;
11092
11093 case TRPM_TRAP:
11094 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11095 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11096 if (u8TrapNo == X86_XCPT_PF)
11097 fFlags |= IEM_XCPT_FLAGS_CR2;
11098 switch (u8TrapNo)
11099 {
11100 case X86_XCPT_DF:
11101 case X86_XCPT_TS:
11102 case X86_XCPT_NP:
11103 case X86_XCPT_SS:
11104 case X86_XCPT_PF:
11105 case X86_XCPT_AC:
11106 fFlags |= IEM_XCPT_FLAGS_ERR;
11107 break;
11108
11109 case X86_XCPT_NMI:
11110 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11111 break;
11112 }
11113 break;
11114
11115 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11116 }
11117
11118 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11119}
11120
11121
11122/**
11123 * Injects the active TRPM event.
11124 *
11125 * @returns Strict VBox status code.
11126 * @param pVCpu Pointer to the VMCPU.
11127 */
11128VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11129{
11130#ifndef IEM_IMPLEMENTS_TASKSWITCH
11131 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11132#else
11133 uint8_t u8TrapNo;
11134 TRPMEVENT enmType;
11135 RTGCUINT uErrCode;
11136 RTGCUINTPTR uCr2;
11137 uint8_t cbInstr;
11138 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11139 if (RT_FAILURE(rc))
11140 return rc;
11141
11142 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11143
11144 /** @todo Are there any other codes that imply the event was successfully
11145 * delivered to the guest? See @bugref{6607}. */
11146 if ( rcStrict == VINF_SUCCESS
11147 || rcStrict == VINF_IEM_RAISED_XCPT)
11148 {
11149 TRPMResetTrap(pVCpu);
11150 }
11151 return rcStrict;
11152#endif
11153}
11154
11155
11156VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11157{
11158 return VERR_NOT_IMPLEMENTED;
11159}
11160
11161
11162VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11163{
11164 return VERR_NOT_IMPLEMENTED;
11165}
11166
11167
11168#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11169/**
11170 * Executes a IRET instruction with default operand size.
11171 *
11172 * This is for PATM.
11173 *
11174 * @returns VBox status code.
11175 * @param pVCpu The current virtual CPU.
11176 * @param pCtxCore The register frame.
11177 */
11178VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11179{
11180 PIEMCPU pIemCpu = &pVCpu->iem.s;
11181 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11182
11183 iemCtxCoreToCtx(pCtx, pCtxCore);
11184 iemInitDecoder(pIemCpu);
11185 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11186 if (rcStrict == VINF_SUCCESS)
11187 iemCtxToCtxCore(pCtxCore, pCtx);
11188 else
11189 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11190 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11191 return rcStrict;
11192}
11193#endif
11194
11195
11196/**
11197 * Macro used by the IEMExec* method to check the given instruction length.
11198 *
11199 * Will return on failure!
11200 *
11201 * @param a_cbInstr The given instruction length.
11202 * @param a_cbMin The minimum length.
11203 */
11204#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11205 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11206 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11207
11208
11209/**
11210 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11211 *
11212 * This API ASSUMES that the caller has already verified that the guest code is
11213 * allowed to access the I/O port. (The I/O port is in the DX register in the
11214 * guest state.)
11215 *
11216 * @returns Strict VBox status code.
11217 * @param pVCpu The cross context per virtual CPU structure.
11218 * @param cbValue The size of the I/O port access (1, 2, or 4).
11219 * @param enmAddrMode The addressing mode.
11220 * @param fRepPrefix Indicates whether a repeat prefix is used
11221 * (doesn't matter which for this instruction).
11222 * @param cbInstr The instruction length in bytes.
11223 * @param iEffSeg The effective segment address.
11224 */
11225VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11226 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11227{
11228 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11229 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11230
11231 /*
11232 * State init.
11233 */
11234 PIEMCPU pIemCpu = &pVCpu->iem.s;
11235 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11236
11237 /*
11238 * Switch orgy for getting to the right handler.
11239 */
11240 VBOXSTRICTRC rcStrict;
11241 if (fRepPrefix)
11242 {
11243 switch (enmAddrMode)
11244 {
11245 case IEMMODE_16BIT:
11246 switch (cbValue)
11247 {
11248 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11249 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11250 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11251 default:
11252 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11253 }
11254 break;
11255
11256 case IEMMODE_32BIT:
11257 switch (cbValue)
11258 {
11259 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11260 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11261 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11262 default:
11263 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11264 }
11265 break;
11266
11267 case IEMMODE_64BIT:
11268 switch (cbValue)
11269 {
11270 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11271 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11272 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11273 default:
11274 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11275 }
11276 break;
11277
11278 default:
11279 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11280 }
11281 }
11282 else
11283 {
11284 switch (enmAddrMode)
11285 {
11286 case IEMMODE_16BIT:
11287 switch (cbValue)
11288 {
11289 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11290 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11291 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11292 default:
11293 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11294 }
11295 break;
11296
11297 case IEMMODE_32BIT:
11298 switch (cbValue)
11299 {
11300 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11301 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11302 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11303 default:
11304 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11305 }
11306 break;
11307
11308 case IEMMODE_64BIT:
11309 switch (cbValue)
11310 {
11311 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11312 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11313 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11314 default:
11315 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11316 }
11317 break;
11318
11319 default:
11320 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11321 }
11322 }
11323
11324 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11325}
11326
11327
11328/**
11329 * Interface for HM and EM for executing string I/O IN (read) instructions.
11330 *
11331 * This API ASSUMES that the caller has already verified that the guest code is
11332 * allowed to access the I/O port. (The I/O port is in the DX register in the
11333 * guest state.)
11334 *
11335 * @returns Strict VBox status code.
11336 * @param pVCpu The cross context per virtual CPU structure.
11337 * @param cbValue The size of the I/O port access (1, 2, or 4).
11338 * @param enmAddrMode The addressing mode.
11339 * @param fRepPrefix Indicates whether a repeat prefix is used
11340 * (doesn't matter which for this instruction).
11341 * @param cbInstr The instruction length in bytes.
11342 */
11343VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11344 bool fRepPrefix, uint8_t cbInstr)
11345{
11346 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11347
11348 /*
11349 * State init.
11350 */
11351 PIEMCPU pIemCpu = &pVCpu->iem.s;
11352 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11353
11354 /*
11355 * Switch orgy for getting to the right handler.
11356 */
11357 VBOXSTRICTRC rcStrict;
11358 if (fRepPrefix)
11359 {
11360 switch (enmAddrMode)
11361 {
11362 case IEMMODE_16BIT:
11363 switch (cbValue)
11364 {
11365 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11366 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11367 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11368 default:
11369 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11370 }
11371 break;
11372
11373 case IEMMODE_32BIT:
11374 switch (cbValue)
11375 {
11376 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11377 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11378 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11379 default:
11380 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11381 }
11382 break;
11383
11384 case IEMMODE_64BIT:
11385 switch (cbValue)
11386 {
11387 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11388 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11389 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11390 default:
11391 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11392 }
11393 break;
11394
11395 default:
11396 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11397 }
11398 }
11399 else
11400 {
11401 switch (enmAddrMode)
11402 {
11403 case IEMMODE_16BIT:
11404 switch (cbValue)
11405 {
11406 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11407 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11408 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11409 default:
11410 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11411 }
11412 break;
11413
11414 case IEMMODE_32BIT:
11415 switch (cbValue)
11416 {
11417 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11418 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11419 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11420 default:
11421 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11422 }
11423 break;
11424
11425 case IEMMODE_64BIT:
11426 switch (cbValue)
11427 {
11428 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11429 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11430 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11431 default:
11432 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11433 }
11434 break;
11435
11436 default:
11437 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11438 }
11439 }
11440
11441 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11442}
11443
11444
11445
11446/**
11447 * Interface for HM and EM to write to a CRx register.
11448 *
11449 * @returns Strict VBox status code.
11450 * @param pVCpu The cross context per virtual CPU structure.
11451 * @param cbInstr The instruction length in bytes.
11452 * @param iCrReg The control register number (destination).
11453 * @param iGReg The general purpose register number (source).
11454 *
11455 * @remarks In ring-0 not all of the state needs to be synced in.
11456 */
11457VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11458{
11459 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11460 Assert(iCrReg < 16);
11461 Assert(iGReg < 16);
11462
11463 PIEMCPU pIemCpu = &pVCpu->iem.s;
11464 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11465 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11466 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11467}
11468
11469
11470/**
11471 * Interface for HM and EM to read from a CRx register.
11472 *
11473 * @returns Strict VBox status code.
11474 * @param pVCpu The cross context per virtual CPU structure.
11475 * @param cbInstr The instruction length in bytes.
11476 * @param iGReg The general purpose register number (destination).
11477 * @param iCrReg The control register number (source).
11478 *
11479 * @remarks In ring-0 not all of the state needs to be synced in.
11480 */
11481VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11482{
11483 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11484 Assert(iCrReg < 16);
11485 Assert(iGReg < 16);
11486
11487 PIEMCPU pIemCpu = &pVCpu->iem.s;
11488 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11489 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11490 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11491}
11492
11493
11494/**
11495 * Interface for HM and EM to clear the CR0[TS] bit.
11496 *
11497 * @returns Strict VBox status code.
11498 * @param pVCpu The cross context per virtual CPU structure.
11499 * @param cbInstr The instruction length in bytes.
11500 *
11501 * @remarks In ring-0 not all of the state needs to be synced in.
11502 */
11503VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11504{
11505 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11506
11507 PIEMCPU pIemCpu = &pVCpu->iem.s;
11508 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11509 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11510 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11511}
11512
11513
11514/**
11515 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11516 *
11517 * @returns Strict VBox status code.
11518 * @param pVCpu The cross context per virtual CPU structure.
11519 * @param cbInstr The instruction length in bytes.
11520 * @param uValue The value to load into CR0.
11521 *
11522 * @remarks In ring-0 not all of the state needs to be synced in.
11523 */
11524VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11525{
11526 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11527
11528 PIEMCPU pIemCpu = &pVCpu->iem.s;
11529 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11530 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11531 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11532}
11533
11534
11535/**
11536 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11537 *
11538 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11539 *
11540 * @returns Strict VBox status code.
11541 * @param pVCpu The cross context per virtual CPU structure of the
11542 * calling EMT.
11543 * @param cbInstr The instruction length in bytes.
11544 * @remarks In ring-0 not all of the state needs to be synced in.
11545 * @threads EMT(pVCpu)
11546 */
11547VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11548{
11549 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11550
11551 PIEMCPU pIemCpu = &pVCpu->iem.s;
11552 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11553 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11554 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11555}
11556
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette