VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 56258

Last change on this file since 56258 was 56258, checked in by vboxsync, 10 years ago

IEM: Don't handle rIP specially in 16-bit mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 436.5 KB
Line 
1/* $Id: IEMAll.cpp 56258 2015-06-05 11:50:26Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78#define IEM_IMPLEMENTS_TASKSWITCH
79
80/*******************************************************************************
81* Header Files *
82*******************************************************************************/
83#define LOG_GROUP LOG_GROUP_IEM
84#include <VBox/vmm/iem.h>
85#include <VBox/vmm/cpum.h>
86#include <VBox/vmm/pdm.h>
87#include <VBox/vmm/pgm.h>
88#include <internal/pgm.h>
89#include <VBox/vmm/iom.h>
90#include <VBox/vmm/em.h>
91#include <VBox/vmm/hm.h>
92#include <VBox/vmm/tm.h>
93#include <VBox/vmm/dbgf.h>
94#include <VBox/vmm/dbgftrace.h>
95#ifdef VBOX_WITH_RAW_MODE_NOT_R0
96# include <VBox/vmm/patm.h>
97# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
98# include <VBox/vmm/csam.h>
99# endif
100#endif
101#include "IEMInternal.h"
102#ifdef IEM_VERIFICATION_MODE_FULL
103# include <VBox/vmm/rem.h>
104# include <VBox/vmm/mm.h>
105#endif
106#include <VBox/vmm/vm.h>
107#include <VBox/log.h>
108#include <VBox/err.h>
109#include <VBox/param.h>
110#include <VBox/dis.h>
111#include <VBox/disopcode.h>
112#include <iprt/assert.h>
113#include <iprt/string.h>
114#include <iprt/x86.h>
115
116
117
118/*******************************************************************************
119* Structures and Typedefs *
120*******************************************************************************/
121/** @typedef PFNIEMOP
122 * Pointer to an opcode decoder function.
123 */
124
125/** @def FNIEMOP_DEF
126 * Define an opcode decoder function.
127 *
128 * We're using macors for this so that adding and removing parameters as well as
129 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
130 *
131 * @param a_Name The function name.
132 */
133
134
135#if defined(__GNUC__) && defined(RT_ARCH_X86)
136typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
137# define FNIEMOP_DEF(a_Name) \
138 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
139# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
140 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
141# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
142 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
143
144#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
145typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
146# define FNIEMOP_DEF(a_Name) \
147 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
148# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
149 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
150# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
151 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
152
153#elif defined(__GNUC__)
154typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#else
163typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
170
171#endif
172
173
174/**
175 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
176 */
177typedef union IEMSELDESC
178{
179 /** The legacy view. */
180 X86DESC Legacy;
181 /** The long mode view. */
182 X86DESC64 Long;
183} IEMSELDESC;
184/** Pointer to a selector descriptor table entry. */
185typedef IEMSELDESC *PIEMSELDESC;
186
187
188/*******************************************************************************
189* Defined Constants And Macros *
190*******************************************************************************/
191/** Temporary hack to disable the double execution. Will be removed in favor
192 * of a dedicated execution mode in EM. */
193//#define IEM_VERIFICATION_MODE_NO_REM
194
195/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
196 * due to GCC lacking knowledge about the value range of a switch. */
197#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
198
199/**
200 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
201 * occation.
202 */
203#ifdef LOG_ENABLED
204# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
205 do { \
206 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
207 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
208 } while (0)
209#else
210# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
212#endif
213
214/**
215 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
216 * occation using the supplied logger statement.
217 *
218 * @param a_LoggerArgs What to log on failure.
219 */
220#ifdef LOG_ENABLED
221# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
222 do { \
223 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
224 /*LogFunc(a_LoggerArgs);*/ \
225 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
226 } while (0)
227#else
228# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
230#endif
231
232/**
233 * Call an opcode decoder function.
234 *
235 * We're using macors for this so that adding and removing parameters can be
236 * done as we please. See FNIEMOP_DEF.
237 */
238#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
239
240/**
241 * Call a common opcode decoder function taking one extra argument.
242 *
243 * We're using macors for this so that adding and removing parameters can be
244 * done as we please. See FNIEMOP_DEF_1.
245 */
246#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
247
248/**
249 * Call a common opcode decoder function taking one extra argument.
250 *
251 * We're using macors for this so that adding and removing parameters can be
252 * done as we please. See FNIEMOP_DEF_1.
253 */
254#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
255
256/**
257 * Check if we're currently executing in real or virtual 8086 mode.
258 *
259 * @returns @c true if it is, @c false if not.
260 * @param a_pIemCpu The IEM state of the current CPU.
261 */
262#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
263
264/**
265 * Check if we're currently executing in virtual 8086 mode.
266 *
267 * @returns @c true if it is, @c false if not.
268 * @param a_pIemCpu The IEM state of the current CPU.
269 */
270#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
271
272/**
273 * Check if we're currently executing in long mode.
274 *
275 * @returns @c true if it is, @c false if not.
276 * @param a_pIemCpu The IEM state of the current CPU.
277 */
278#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
279
280/**
281 * Check if we're currently executing in real mode.
282 *
283 * @returns @c true if it is, @c false if not.
284 * @param a_pIemCpu The IEM state of the current CPU.
285 */
286#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
287
288/**
289 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
290 * @returns PCCPUMFEATURES
291 * @param a_pIemCpu The IEM state of the current CPU.
292 */
293#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
294
295/**
296 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
297 * @returns PCCPUMFEATURES
298 * @param a_pIemCpu The IEM state of the current CPU.
299 */
300#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
301
302/**
303 * Evaluates to true if we're presenting an Intel CPU to the guest.
304 */
305#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
306
307/**
308 * Evaluates to true if we're presenting an AMD CPU to the guest.
309 */
310#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
311
312/**
313 * Check if the address is canonical.
314 */
315#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
316
317
318/*******************************************************************************
319* Global Variables *
320*******************************************************************************/
321extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
322
323
324/** Function table for the ADD instruction. */
325IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
326{
327 iemAImpl_add_u8, iemAImpl_add_u8_locked,
328 iemAImpl_add_u16, iemAImpl_add_u16_locked,
329 iemAImpl_add_u32, iemAImpl_add_u32_locked,
330 iemAImpl_add_u64, iemAImpl_add_u64_locked
331};
332
333/** Function table for the ADC instruction. */
334IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
335{
336 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
337 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
338 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
339 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
340};
341
342/** Function table for the SUB instruction. */
343IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
344{
345 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
346 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
347 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
348 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
349};
350
351/** Function table for the SBB instruction. */
352IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
353{
354 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
355 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
356 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
357 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
358};
359
360/** Function table for the OR instruction. */
361IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
362{
363 iemAImpl_or_u8, iemAImpl_or_u8_locked,
364 iemAImpl_or_u16, iemAImpl_or_u16_locked,
365 iemAImpl_or_u32, iemAImpl_or_u32_locked,
366 iemAImpl_or_u64, iemAImpl_or_u64_locked
367};
368
369/** Function table for the XOR instruction. */
370IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
371{
372 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
373 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
374 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
375 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
376};
377
378/** Function table for the AND instruction. */
379IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
380{
381 iemAImpl_and_u8, iemAImpl_and_u8_locked,
382 iemAImpl_and_u16, iemAImpl_and_u16_locked,
383 iemAImpl_and_u32, iemAImpl_and_u32_locked,
384 iemAImpl_and_u64, iemAImpl_and_u64_locked
385};
386
387/** Function table for the CMP instruction.
388 * @remarks Making operand order ASSUMPTIONS.
389 */
390IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
391{
392 iemAImpl_cmp_u8, NULL,
393 iemAImpl_cmp_u16, NULL,
394 iemAImpl_cmp_u32, NULL,
395 iemAImpl_cmp_u64, NULL
396};
397
398/** Function table for the TEST instruction.
399 * @remarks Making operand order ASSUMPTIONS.
400 */
401IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
402{
403 iemAImpl_test_u8, NULL,
404 iemAImpl_test_u16, NULL,
405 iemAImpl_test_u32, NULL,
406 iemAImpl_test_u64, NULL
407};
408
409/** Function table for the BT instruction. */
410IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
411{
412 NULL, NULL,
413 iemAImpl_bt_u16, NULL,
414 iemAImpl_bt_u32, NULL,
415 iemAImpl_bt_u64, NULL
416};
417
418/** Function table for the BTC instruction. */
419IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
420{
421 NULL, NULL,
422 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
423 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
424 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
425};
426
427/** Function table for the BTR instruction. */
428IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
429{
430 NULL, NULL,
431 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
432 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
433 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
434};
435
436/** Function table for the BTS instruction. */
437IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
438{
439 NULL, NULL,
440 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
441 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
442 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
443};
444
445/** Function table for the BSF instruction. */
446IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
447{
448 NULL, NULL,
449 iemAImpl_bsf_u16, NULL,
450 iemAImpl_bsf_u32, NULL,
451 iemAImpl_bsf_u64, NULL
452};
453
454/** Function table for the BSR instruction. */
455IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
456{
457 NULL, NULL,
458 iemAImpl_bsr_u16, NULL,
459 iemAImpl_bsr_u32, NULL,
460 iemAImpl_bsr_u64, NULL
461};
462
463/** Function table for the IMUL instruction. */
464IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
465{
466 NULL, NULL,
467 iemAImpl_imul_two_u16, NULL,
468 iemAImpl_imul_two_u32, NULL,
469 iemAImpl_imul_two_u64, NULL
470};
471
472/** Group 1 /r lookup table. */
473IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
474{
475 &g_iemAImpl_add,
476 &g_iemAImpl_or,
477 &g_iemAImpl_adc,
478 &g_iemAImpl_sbb,
479 &g_iemAImpl_and,
480 &g_iemAImpl_sub,
481 &g_iemAImpl_xor,
482 &g_iemAImpl_cmp
483};
484
485/** Function table for the INC instruction. */
486IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
487{
488 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
489 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
490 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
491 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
492};
493
494/** Function table for the DEC instruction. */
495IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
496{
497 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
498 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
499 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
500 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
501};
502
503/** Function table for the NEG instruction. */
504IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
505{
506 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
507 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
508 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
509 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
510};
511
512/** Function table for the NOT instruction. */
513IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
514{
515 iemAImpl_not_u8, iemAImpl_not_u8_locked,
516 iemAImpl_not_u16, iemAImpl_not_u16_locked,
517 iemAImpl_not_u32, iemAImpl_not_u32_locked,
518 iemAImpl_not_u64, iemAImpl_not_u64_locked
519};
520
521
522/** Function table for the ROL instruction. */
523IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
524{
525 iemAImpl_rol_u8,
526 iemAImpl_rol_u16,
527 iemAImpl_rol_u32,
528 iemAImpl_rol_u64
529};
530
531/** Function table for the ROR instruction. */
532IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
533{
534 iemAImpl_ror_u8,
535 iemAImpl_ror_u16,
536 iemAImpl_ror_u32,
537 iemAImpl_ror_u64
538};
539
540/** Function table for the RCL instruction. */
541IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
542{
543 iemAImpl_rcl_u8,
544 iemAImpl_rcl_u16,
545 iemAImpl_rcl_u32,
546 iemAImpl_rcl_u64
547};
548
549/** Function table for the RCR instruction. */
550IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
551{
552 iemAImpl_rcr_u8,
553 iemAImpl_rcr_u16,
554 iemAImpl_rcr_u32,
555 iemAImpl_rcr_u64
556};
557
558/** Function table for the SHL instruction. */
559IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
560{
561 iemAImpl_shl_u8,
562 iemAImpl_shl_u16,
563 iemAImpl_shl_u32,
564 iemAImpl_shl_u64
565};
566
567/** Function table for the SHR instruction. */
568IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
569{
570 iemAImpl_shr_u8,
571 iemAImpl_shr_u16,
572 iemAImpl_shr_u32,
573 iemAImpl_shr_u64
574};
575
576/** Function table for the SAR instruction. */
577IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
578{
579 iemAImpl_sar_u8,
580 iemAImpl_sar_u16,
581 iemAImpl_sar_u32,
582 iemAImpl_sar_u64
583};
584
585
586/** Function table for the MUL instruction. */
587IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
588{
589 iemAImpl_mul_u8,
590 iemAImpl_mul_u16,
591 iemAImpl_mul_u32,
592 iemAImpl_mul_u64
593};
594
595/** Function table for the IMUL instruction working implicitly on rAX. */
596IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
597{
598 iemAImpl_imul_u8,
599 iemAImpl_imul_u16,
600 iemAImpl_imul_u32,
601 iemAImpl_imul_u64
602};
603
604/** Function table for the DIV instruction. */
605IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
606{
607 iemAImpl_div_u8,
608 iemAImpl_div_u16,
609 iemAImpl_div_u32,
610 iemAImpl_div_u64
611};
612
613/** Function table for the MUL instruction. */
614IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
615{
616 iemAImpl_idiv_u8,
617 iemAImpl_idiv_u16,
618 iemAImpl_idiv_u32,
619 iemAImpl_idiv_u64
620};
621
622/** Function table for the SHLD instruction */
623IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
624{
625 iemAImpl_shld_u16,
626 iemAImpl_shld_u32,
627 iemAImpl_shld_u64,
628};
629
630/** Function table for the SHRD instruction */
631IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
632{
633 iemAImpl_shrd_u16,
634 iemAImpl_shrd_u32,
635 iemAImpl_shrd_u64,
636};
637
638
639/** Function table for the PUNPCKLBW instruction */
640IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
641/** Function table for the PUNPCKLBD instruction */
642IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
643/** Function table for the PUNPCKLDQ instruction */
644IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
645/** Function table for the PUNPCKLQDQ instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
647
648/** Function table for the PUNPCKHBW instruction */
649IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
650/** Function table for the PUNPCKHBD instruction */
651IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
652/** Function table for the PUNPCKHDQ instruction */
653IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
654/** Function table for the PUNPCKHQDQ instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
656
657/** Function table for the PXOR instruction */
658IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
659/** Function table for the PCMPEQB instruction */
660IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
661/** Function table for the PCMPEQW instruction */
662IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
663/** Function table for the PCMPEQD instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
665
666
667#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
668/** What IEM just wrote. */
669uint8_t g_abIemWrote[256];
670/** How much IEM just wrote. */
671size_t g_cbIemWrote;
672#endif
673
674
675/*******************************************************************************
676* Internal Functions *
677*******************************************************************************/
678IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
679IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
680IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
681IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
682/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
683IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
684IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
685IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
686IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
687IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
688IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
689IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
691IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
692IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
693IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
694IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
695IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
696IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
698IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
699IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
700IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
701IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
704IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
705IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
706IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
707IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
708IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
709IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
710IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
711
712#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
713IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
714#endif
715IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
716IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
717
718
719
720/**
721 * Sets the pass up status.
722 *
723 * @returns VINF_SUCCESS.
724 * @param pIemCpu The per CPU IEM state of the calling thread.
725 * @param rcPassUp The pass up status. Must be informational.
726 * VINF_SUCCESS is not allowed.
727 */
728IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
729{
730 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
731
732 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
733 if (rcOldPassUp == VINF_SUCCESS)
734 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
735 /* If both are EM scheduling codes, use EM priority rules. */
736 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
737 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
738 {
739 if (rcPassUp < rcOldPassUp)
740 {
741 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
742 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
743 }
744 else
745 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
746 }
747 /* Override EM scheduling with specific status code. */
748 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
749 {
750 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
751 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
752 }
753 /* Don't override specific status code, first come first served. */
754 else
755 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
756 return VINF_SUCCESS;
757}
758
759
760/**
761 * Initializes the execution state.
762 *
763 * @param pIemCpu The per CPU IEM state.
764 * @param fBypassHandlers Whether to bypass access handlers.
765 */
766DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
767{
768 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
769 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
770
771#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
772 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
773 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
774 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
775 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
776 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
778 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
779 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
780#endif
781
782#ifdef VBOX_WITH_RAW_MODE_NOT_R0
783 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
784#endif
785 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
786 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
787 ? IEMMODE_64BIT
788 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
789 ? IEMMODE_32BIT
790 : IEMMODE_16BIT;
791 pIemCpu->enmCpuMode = enmMode;
792#ifdef VBOX_STRICT
793 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
794 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
795 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
796 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
797 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
798 pIemCpu->uRexReg = 127;
799 pIemCpu->uRexB = 127;
800 pIemCpu->uRexIndex = 127;
801 pIemCpu->iEffSeg = 127;
802 pIemCpu->offOpcode = 127;
803 pIemCpu->cbOpcode = 127;
804#endif
805
806 pIemCpu->cActiveMappings = 0;
807 pIemCpu->iNextMapping = 0;
808 pIemCpu->rcPassUp = VINF_SUCCESS;
809 pIemCpu->fBypassHandlers = fBypassHandlers;
810#ifdef VBOX_WITH_RAW_MODE_NOT_R0
811 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
812 && pCtx->cs.u64Base == 0
813 && pCtx->cs.u32Limit == UINT32_MAX
814 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
815 if (!pIemCpu->fInPatchCode)
816 CPUMRawLeave(pVCpu, VINF_SUCCESS);
817#endif
818}
819
820
821/**
822 * Initializes the decoder state.
823 *
824 * @param pIemCpu The per CPU IEM state.
825 * @param fBypassHandlers Whether to bypass access handlers.
826 */
827DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
828{
829 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
830 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
831
832#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
833 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
834 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
835 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
836 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
837 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
838 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
839 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
840 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
841#endif
842
843#ifdef VBOX_WITH_RAW_MODE_NOT_R0
844 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
845#endif
846 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
847#ifdef IEM_VERIFICATION_MODE_FULL
848 if (pIemCpu->uInjectCpl != UINT8_MAX)
849 pIemCpu->uCpl = pIemCpu->uInjectCpl;
850#endif
851 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
852 ? IEMMODE_64BIT
853 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
854 ? IEMMODE_32BIT
855 : IEMMODE_16BIT;
856 pIemCpu->enmCpuMode = enmMode;
857 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
858 pIemCpu->enmEffAddrMode = enmMode;
859 if (enmMode != IEMMODE_64BIT)
860 {
861 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
862 pIemCpu->enmEffOpSize = enmMode;
863 }
864 else
865 {
866 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
867 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
868 }
869 pIemCpu->fPrefixes = 0;
870 pIemCpu->uRexReg = 0;
871 pIemCpu->uRexB = 0;
872 pIemCpu->uRexIndex = 0;
873 pIemCpu->iEffSeg = X86_SREG_DS;
874 pIemCpu->offOpcode = 0;
875 pIemCpu->cbOpcode = 0;
876 pIemCpu->cActiveMappings = 0;
877 pIemCpu->iNextMapping = 0;
878 pIemCpu->rcPassUp = VINF_SUCCESS;
879 pIemCpu->fBypassHandlers = fBypassHandlers;
880#ifdef VBOX_WITH_RAW_MODE_NOT_R0
881 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
882 && pCtx->cs.u64Base == 0
883 && pCtx->cs.u32Limit == UINT32_MAX
884 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
885 if (!pIemCpu->fInPatchCode)
886 CPUMRawLeave(pVCpu, VINF_SUCCESS);
887#endif
888
889#ifdef DBGFTRACE_ENABLED
890 switch (enmMode)
891 {
892 case IEMMODE_64BIT:
893 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
894 break;
895 case IEMMODE_32BIT:
896 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
897 break;
898 case IEMMODE_16BIT:
899 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
900 break;
901 }
902#endif
903}
904
905
906/**
907 * Prefetch opcodes the first time when starting executing.
908 *
909 * @returns Strict VBox status code.
910 * @param pIemCpu The IEM state.
911 * @param fBypassHandlers Whether to bypass access handlers.
912 */
913IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
914{
915#ifdef IEM_VERIFICATION_MODE_FULL
916 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
917#endif
918 iemInitDecoder(pIemCpu, fBypassHandlers);
919
920 /*
921 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
922 *
923 * First translate CS:rIP to a physical address.
924 */
925 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
926 uint32_t cbToTryRead;
927 RTGCPTR GCPtrPC;
928 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
929 {
930 cbToTryRead = PAGE_SIZE;
931 GCPtrPC = pCtx->rip;
932 if (!IEM_IS_CANONICAL(GCPtrPC))
933 return iemRaiseGeneralProtectionFault0(pIemCpu);
934 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
935 }
936 else
937 {
938 uint32_t GCPtrPC32 = pCtx->eip;
939 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
940 if (GCPtrPC32 > pCtx->cs.u32Limit)
941 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
942 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
943 if (!cbToTryRead) /* overflowed */
944 {
945 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
946 cbToTryRead = UINT32_MAX;
947 }
948 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
949 Assert(GCPtrPC <= UINT32_MAX);
950 }
951
952#ifdef VBOX_WITH_RAW_MODE_NOT_R0
953 /* Allow interpretation of patch manager code blocks since they can for
954 instance throw #PFs for perfectly good reasons. */
955 if (pIemCpu->fInPatchCode)
956 {
957 size_t cbRead = 0;
958 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
959 AssertRCReturn(rc, rc);
960 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
961 return VINF_SUCCESS;
962 }
963#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
964
965 RTGCPHYS GCPhys;
966 uint64_t fFlags;
967 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
968 if (RT_FAILURE(rc))
969 {
970 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
971 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
972 }
973 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
974 {
975 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
976 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
977 }
978 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
979 {
980 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
981 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
982 }
983 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
984 /** @todo Check reserved bits and such stuff. PGM is better at doing
985 * that, so do it when implementing the guest virtual address
986 * TLB... */
987
988#ifdef IEM_VERIFICATION_MODE_FULL
989 /*
990 * Optimistic optimization: Use unconsumed opcode bytes from the previous
991 * instruction.
992 */
993 /** @todo optimize this differently by not using PGMPhysRead. */
994 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
995 pIemCpu->GCPhysOpcodes = GCPhys;
996 if ( offPrevOpcodes < cbOldOpcodes
997 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
998 {
999 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1000 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1001 pIemCpu->cbOpcode = cbNew;
1002 return VINF_SUCCESS;
1003 }
1004#endif
1005
1006 /*
1007 * Read the bytes at this address.
1008 */
1009 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1010#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1011 size_t cbActual;
1012 if ( PATMIsEnabled(pVM)
1013 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1014 {
1015 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1016 Assert(cbActual > 0);
1017 pIemCpu->cbOpcode = (uint8_t)cbActual;
1018 }
1019 else
1020#endif
1021 {
1022 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1023 if (cbToTryRead > cbLeftOnPage)
1024 cbToTryRead = cbLeftOnPage;
1025 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1026 cbToTryRead = sizeof(pIemCpu->abOpcode);
1027
1028 if (!pIemCpu->fBypassHandlers)
1029 {
1030 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1031 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1032 { /* likely */ }
1033 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1034 {
1035 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1036 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1037 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1038 }
1039 else
1040 {
1041 Log((RT_SUCCESS(rcStrict)
1042 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1043 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1044 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1045 return rcStrict;
1046 }
1047 }
1048 else
1049 {
1050 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1051 if (RT_SUCCESS(rc))
1052 { /* likely */ }
1053 else
1054 {
1055 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1056 GCPtrPC, GCPhys, rc, cbToTryRead));
1057 return rc;
1058 }
1059 }
1060 pIemCpu->cbOpcode = cbToTryRead;
1061 }
1062
1063 return VINF_SUCCESS;
1064}
1065
1066
1067/**
1068 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1069 * exception if it fails.
1070 *
1071 * @returns Strict VBox status code.
1072 * @param pIemCpu The IEM state.
1073 * @param cbMin The minimum number of bytes relative offOpcode
1074 * that must be read.
1075 */
1076IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1077{
1078 /*
1079 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1080 *
1081 * First translate CS:rIP to a physical address.
1082 */
1083 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1084 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1085 uint32_t cbToTryRead;
1086 RTGCPTR GCPtrNext;
1087 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1088 {
1089 cbToTryRead = PAGE_SIZE;
1090 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1091 if (!IEM_IS_CANONICAL(GCPtrNext))
1092 return iemRaiseGeneralProtectionFault0(pIemCpu);
1093 }
1094 else
1095 {
1096 uint32_t GCPtrNext32 = pCtx->eip;
1097 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1098 GCPtrNext32 += pIemCpu->cbOpcode;
1099 if (GCPtrNext32 > pCtx->cs.u32Limit)
1100 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1101 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1102 if (!cbToTryRead) /* overflowed */
1103 {
1104 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1105 cbToTryRead = UINT32_MAX;
1106 /** @todo check out wrapping around the code segment. */
1107 }
1108 if (cbToTryRead < cbMin - cbLeft)
1109 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1110 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1111 }
1112
1113 /* Only read up to the end of the page, and make sure we don't read more
1114 than the opcode buffer can hold. */
1115 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1116 if (cbToTryRead > cbLeftOnPage)
1117 cbToTryRead = cbLeftOnPage;
1118 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1119 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1120/** @todo r=bird: Convert assertion into undefined opcode exception? */
1121 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1122
1123#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1124 /* Allow interpretation of patch manager code blocks since they can for
1125 instance throw #PFs for perfectly good reasons. */
1126 if (pIemCpu->fInPatchCode)
1127 {
1128 size_t cbRead = 0;
1129 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1130 AssertRCReturn(rc, rc);
1131 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1132 return VINF_SUCCESS;
1133 }
1134#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1135
1136 RTGCPHYS GCPhys;
1137 uint64_t fFlags;
1138 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1139 if (RT_FAILURE(rc))
1140 {
1141 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1142 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1143 }
1144 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1145 {
1146 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1147 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1148 }
1149 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1150 {
1151 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1152 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1153 }
1154 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1155 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1156 /** @todo Check reserved bits and such stuff. PGM is better at doing
1157 * that, so do it when implementing the guest virtual address
1158 * TLB... */
1159
1160 /*
1161 * Read the bytes at this address.
1162 *
1163 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1164 * and since PATM should only patch the start of an instruction there
1165 * should be no need to check again here.
1166 */
1167 if (!pIemCpu->fBypassHandlers)
1168 {
1169 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1170 cbToTryRead, PGMACCESSORIGIN_IEM);
1171 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1172 { /* likely */ }
1173 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1174 {
1175 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1176 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1177 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1178 }
1179 else
1180 {
1181 Log((RT_SUCCESS(rcStrict)
1182 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1183 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1184 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1185 return rcStrict;
1186 }
1187 }
1188 else
1189 {
1190 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1191 if (RT_SUCCESS(rc))
1192 { /* likely */ }
1193 else
1194 {
1195 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1196 return rc;
1197 }
1198 }
1199 pIemCpu->cbOpcode += cbToTryRead;
1200 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1201
1202 return VINF_SUCCESS;
1203}
1204
1205
1206/**
1207 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1208 *
1209 * @returns Strict VBox status code.
1210 * @param pIemCpu The IEM state.
1211 * @param pb Where to return the opcode byte.
1212 */
1213DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1214{
1215 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1216 if (rcStrict == VINF_SUCCESS)
1217 {
1218 uint8_t offOpcode = pIemCpu->offOpcode;
1219 *pb = pIemCpu->abOpcode[offOpcode];
1220 pIemCpu->offOpcode = offOpcode + 1;
1221 }
1222 else
1223 *pb = 0;
1224 return rcStrict;
1225}
1226
1227
1228/**
1229 * Fetches the next opcode byte.
1230 *
1231 * @returns Strict VBox status code.
1232 * @param pIemCpu The IEM state.
1233 * @param pu8 Where to return the opcode byte.
1234 */
1235DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1236{
1237 uint8_t const offOpcode = pIemCpu->offOpcode;
1238 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1239 {
1240 *pu8 = pIemCpu->abOpcode[offOpcode];
1241 pIemCpu->offOpcode = offOpcode + 1;
1242 return VINF_SUCCESS;
1243 }
1244 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1245}
1246
1247
1248/**
1249 * Fetches the next opcode byte, returns automatically on failure.
1250 *
1251 * @param a_pu8 Where to return the opcode byte.
1252 * @remark Implicitly references pIemCpu.
1253 */
1254#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1255 do \
1256 { \
1257 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1258 if (rcStrict2 != VINF_SUCCESS) \
1259 return rcStrict2; \
1260 } while (0)
1261
1262
1263/**
1264 * Fetches the next signed byte from the opcode stream.
1265 *
1266 * @returns Strict VBox status code.
1267 * @param pIemCpu The IEM state.
1268 * @param pi8 Where to return the signed byte.
1269 */
1270DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1271{
1272 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1273}
1274
1275
1276/**
1277 * Fetches the next signed byte from the opcode stream, returning automatically
1278 * on failure.
1279 *
1280 * @param pi8 Where to return the signed byte.
1281 * @remark Implicitly references pIemCpu.
1282 */
1283#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1284 do \
1285 { \
1286 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1287 if (rcStrict2 != VINF_SUCCESS) \
1288 return rcStrict2; \
1289 } while (0)
1290
1291
1292/**
1293 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1294 *
1295 * @returns Strict VBox status code.
1296 * @param pIemCpu The IEM state.
1297 * @param pu16 Where to return the opcode dword.
1298 */
1299DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1300{
1301 uint8_t u8;
1302 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1303 if (rcStrict == VINF_SUCCESS)
1304 *pu16 = (int8_t)u8;
1305 return rcStrict;
1306}
1307
1308
1309/**
1310 * Fetches the next signed byte from the opcode stream, extending it to
1311 * unsigned 16-bit.
1312 *
1313 * @returns Strict VBox status code.
1314 * @param pIemCpu The IEM state.
1315 * @param pu16 Where to return the unsigned word.
1316 */
1317DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1318{
1319 uint8_t const offOpcode = pIemCpu->offOpcode;
1320 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1321 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1322
1323 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1324 pIemCpu->offOpcode = offOpcode + 1;
1325 return VINF_SUCCESS;
1326}
1327
1328
1329/**
1330 * Fetches the next signed byte from the opcode stream and sign-extending it to
1331 * a word, returning automatically on failure.
1332 *
1333 * @param pu16 Where to return the word.
1334 * @remark Implicitly references pIemCpu.
1335 */
1336#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1337 do \
1338 { \
1339 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1340 if (rcStrict2 != VINF_SUCCESS) \
1341 return rcStrict2; \
1342 } while (0)
1343
1344
1345/**
1346 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1347 *
1348 * @returns Strict VBox status code.
1349 * @param pIemCpu The IEM state.
1350 * @param pu32 Where to return the opcode dword.
1351 */
1352DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1353{
1354 uint8_t u8;
1355 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1356 if (rcStrict == VINF_SUCCESS)
1357 *pu32 = (int8_t)u8;
1358 return rcStrict;
1359}
1360
1361
1362/**
1363 * Fetches the next signed byte from the opcode stream, extending it to
1364 * unsigned 32-bit.
1365 *
1366 * @returns Strict VBox status code.
1367 * @param pIemCpu The IEM state.
1368 * @param pu32 Where to return the unsigned dword.
1369 */
1370DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1371{
1372 uint8_t const offOpcode = pIemCpu->offOpcode;
1373 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1374 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1375
1376 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1377 pIemCpu->offOpcode = offOpcode + 1;
1378 return VINF_SUCCESS;
1379}
1380
1381
1382/**
1383 * Fetches the next signed byte from the opcode stream and sign-extending it to
1384 * a word, returning automatically on failure.
1385 *
1386 * @param pu32 Where to return the word.
1387 * @remark Implicitly references pIemCpu.
1388 */
1389#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1390 do \
1391 { \
1392 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1393 if (rcStrict2 != VINF_SUCCESS) \
1394 return rcStrict2; \
1395 } while (0)
1396
1397
1398/**
1399 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pIemCpu The IEM state.
1403 * @param pu64 Where to return the opcode qword.
1404 */
1405DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1406{
1407 uint8_t u8;
1408 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1409 if (rcStrict == VINF_SUCCESS)
1410 *pu64 = (int8_t)u8;
1411 return rcStrict;
1412}
1413
1414
1415/**
1416 * Fetches the next signed byte from the opcode stream, extending it to
1417 * unsigned 64-bit.
1418 *
1419 * @returns Strict VBox status code.
1420 * @param pIemCpu The IEM state.
1421 * @param pu64 Where to return the unsigned qword.
1422 */
1423DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1424{
1425 uint8_t const offOpcode = pIemCpu->offOpcode;
1426 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1427 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1428
1429 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1430 pIemCpu->offOpcode = offOpcode + 1;
1431 return VINF_SUCCESS;
1432}
1433
1434
1435/**
1436 * Fetches the next signed byte from the opcode stream and sign-extending it to
1437 * a word, returning automatically on failure.
1438 *
1439 * @param pu64 Where to return the word.
1440 * @remark Implicitly references pIemCpu.
1441 */
1442#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1443 do \
1444 { \
1445 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1446 if (rcStrict2 != VINF_SUCCESS) \
1447 return rcStrict2; \
1448 } while (0)
1449
1450
1451/**
1452 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1453 *
1454 * @returns Strict VBox status code.
1455 * @param pIemCpu The IEM state.
1456 * @param pu16 Where to return the opcode word.
1457 */
1458DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1459{
1460 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1461 if (rcStrict == VINF_SUCCESS)
1462 {
1463 uint8_t offOpcode = pIemCpu->offOpcode;
1464 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1465 pIemCpu->offOpcode = offOpcode + 2;
1466 }
1467 else
1468 *pu16 = 0;
1469 return rcStrict;
1470}
1471
1472
1473/**
1474 * Fetches the next opcode word.
1475 *
1476 * @returns Strict VBox status code.
1477 * @param pIemCpu The IEM state.
1478 * @param pu16 Where to return the opcode word.
1479 */
1480DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1481{
1482 uint8_t const offOpcode = pIemCpu->offOpcode;
1483 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1484 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1485
1486 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1487 pIemCpu->offOpcode = offOpcode + 2;
1488 return VINF_SUCCESS;
1489}
1490
1491
1492/**
1493 * Fetches the next opcode word, returns automatically on failure.
1494 *
1495 * @param a_pu16 Where to return the opcode word.
1496 * @remark Implicitly references pIemCpu.
1497 */
1498#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1499 do \
1500 { \
1501 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1502 if (rcStrict2 != VINF_SUCCESS) \
1503 return rcStrict2; \
1504 } while (0)
1505
1506
1507/**
1508 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1509 *
1510 * @returns Strict VBox status code.
1511 * @param pIemCpu The IEM state.
1512 * @param pu32 Where to return the opcode double word.
1513 */
1514DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1515{
1516 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1517 if (rcStrict == VINF_SUCCESS)
1518 {
1519 uint8_t offOpcode = pIemCpu->offOpcode;
1520 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1521 pIemCpu->offOpcode = offOpcode + 2;
1522 }
1523 else
1524 *pu32 = 0;
1525 return rcStrict;
1526}
1527
1528
1529/**
1530 * Fetches the next opcode word, zero extending it to a double word.
1531 *
1532 * @returns Strict VBox status code.
1533 * @param pIemCpu The IEM state.
1534 * @param pu32 Where to return the opcode double word.
1535 */
1536DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1537{
1538 uint8_t const offOpcode = pIemCpu->offOpcode;
1539 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1540 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1541
1542 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1543 pIemCpu->offOpcode = offOpcode + 2;
1544 return VINF_SUCCESS;
1545}
1546
1547
1548/**
1549 * Fetches the next opcode word and zero extends it to a double word, returns
1550 * automatically on failure.
1551 *
1552 * @param a_pu32 Where to return the opcode double word.
1553 * @remark Implicitly references pIemCpu.
1554 */
1555#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1556 do \
1557 { \
1558 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1559 if (rcStrict2 != VINF_SUCCESS) \
1560 return rcStrict2; \
1561 } while (0)
1562
1563
1564/**
1565 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1566 *
1567 * @returns Strict VBox status code.
1568 * @param pIemCpu The IEM state.
1569 * @param pu64 Where to return the opcode quad word.
1570 */
1571DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1572{
1573 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1574 if (rcStrict == VINF_SUCCESS)
1575 {
1576 uint8_t offOpcode = pIemCpu->offOpcode;
1577 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1578 pIemCpu->offOpcode = offOpcode + 2;
1579 }
1580 else
1581 *pu64 = 0;
1582 return rcStrict;
1583}
1584
1585
1586/**
1587 * Fetches the next opcode word, zero extending it to a quad word.
1588 *
1589 * @returns Strict VBox status code.
1590 * @param pIemCpu The IEM state.
1591 * @param pu64 Where to return the opcode quad word.
1592 */
1593DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1594{
1595 uint8_t const offOpcode = pIemCpu->offOpcode;
1596 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1597 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1598
1599 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1600 pIemCpu->offOpcode = offOpcode + 2;
1601 return VINF_SUCCESS;
1602}
1603
1604
1605/**
1606 * Fetches the next opcode word and zero extends it to a quad word, returns
1607 * automatically on failure.
1608 *
1609 * @param a_pu64 Where to return the opcode quad word.
1610 * @remark Implicitly references pIemCpu.
1611 */
1612#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1613 do \
1614 { \
1615 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1616 if (rcStrict2 != VINF_SUCCESS) \
1617 return rcStrict2; \
1618 } while (0)
1619
1620
1621/**
1622 * Fetches the next signed word from the opcode stream.
1623 *
1624 * @returns Strict VBox status code.
1625 * @param pIemCpu The IEM state.
1626 * @param pi16 Where to return the signed word.
1627 */
1628DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1629{
1630 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1631}
1632
1633
1634/**
1635 * Fetches the next signed word from the opcode stream, returning automatically
1636 * on failure.
1637 *
1638 * @param pi16 Where to return the signed word.
1639 * @remark Implicitly references pIemCpu.
1640 */
1641#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1642 do \
1643 { \
1644 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1645 if (rcStrict2 != VINF_SUCCESS) \
1646 return rcStrict2; \
1647 } while (0)
1648
1649
1650/**
1651 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1652 *
1653 * @returns Strict VBox status code.
1654 * @param pIemCpu The IEM state.
1655 * @param pu32 Where to return the opcode dword.
1656 */
1657DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1658{
1659 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1660 if (rcStrict == VINF_SUCCESS)
1661 {
1662 uint8_t offOpcode = pIemCpu->offOpcode;
1663 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1664 pIemCpu->abOpcode[offOpcode + 1],
1665 pIemCpu->abOpcode[offOpcode + 2],
1666 pIemCpu->abOpcode[offOpcode + 3]);
1667 pIemCpu->offOpcode = offOpcode + 4;
1668 }
1669 else
1670 *pu32 = 0;
1671 return rcStrict;
1672}
1673
1674
1675/**
1676 * Fetches the next opcode dword.
1677 *
1678 * @returns Strict VBox status code.
1679 * @param pIemCpu The IEM state.
1680 * @param pu32 Where to return the opcode double word.
1681 */
1682DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1683{
1684 uint8_t const offOpcode = pIemCpu->offOpcode;
1685 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1686 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1687
1688 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1689 pIemCpu->abOpcode[offOpcode + 1],
1690 pIemCpu->abOpcode[offOpcode + 2],
1691 pIemCpu->abOpcode[offOpcode + 3]);
1692 pIemCpu->offOpcode = offOpcode + 4;
1693 return VINF_SUCCESS;
1694}
1695
1696
1697/**
1698 * Fetches the next opcode dword, returns automatically on failure.
1699 *
1700 * @param a_pu32 Where to return the opcode dword.
1701 * @remark Implicitly references pIemCpu.
1702 */
1703#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1704 do \
1705 { \
1706 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1707 if (rcStrict2 != VINF_SUCCESS) \
1708 return rcStrict2; \
1709 } while (0)
1710
1711
1712/**
1713 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1714 *
1715 * @returns Strict VBox status code.
1716 * @param pIemCpu The IEM state.
1717 * @param pu32 Where to return the opcode dword.
1718 */
1719DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1720{
1721 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1722 if (rcStrict == VINF_SUCCESS)
1723 {
1724 uint8_t offOpcode = pIemCpu->offOpcode;
1725 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1726 pIemCpu->abOpcode[offOpcode + 1],
1727 pIemCpu->abOpcode[offOpcode + 2],
1728 pIemCpu->abOpcode[offOpcode + 3]);
1729 pIemCpu->offOpcode = offOpcode + 4;
1730 }
1731 else
1732 *pu64 = 0;
1733 return rcStrict;
1734}
1735
1736
1737/**
1738 * Fetches the next opcode dword, zero extending it to a quad word.
1739 *
1740 * @returns Strict VBox status code.
1741 * @param pIemCpu The IEM state.
1742 * @param pu64 Where to return the opcode quad word.
1743 */
1744DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1745{
1746 uint8_t const offOpcode = pIemCpu->offOpcode;
1747 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1748 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1749
1750 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1751 pIemCpu->abOpcode[offOpcode + 1],
1752 pIemCpu->abOpcode[offOpcode + 2],
1753 pIemCpu->abOpcode[offOpcode + 3]);
1754 pIemCpu->offOpcode = offOpcode + 4;
1755 return VINF_SUCCESS;
1756}
1757
1758
1759/**
1760 * Fetches the next opcode dword and zero extends it to a quad word, returns
1761 * automatically on failure.
1762 *
1763 * @param a_pu64 Where to return the opcode quad word.
1764 * @remark Implicitly references pIemCpu.
1765 */
1766#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1767 do \
1768 { \
1769 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1770 if (rcStrict2 != VINF_SUCCESS) \
1771 return rcStrict2; \
1772 } while (0)
1773
1774
1775/**
1776 * Fetches the next signed double word from the opcode stream.
1777 *
1778 * @returns Strict VBox status code.
1779 * @param pIemCpu The IEM state.
1780 * @param pi32 Where to return the signed double word.
1781 */
1782DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1783{
1784 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1785}
1786
1787/**
1788 * Fetches the next signed double word from the opcode stream, returning
1789 * automatically on failure.
1790 *
1791 * @param pi32 Where to return the signed double word.
1792 * @remark Implicitly references pIemCpu.
1793 */
1794#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1795 do \
1796 { \
1797 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1798 if (rcStrict2 != VINF_SUCCESS) \
1799 return rcStrict2; \
1800 } while (0)
1801
1802
1803/**
1804 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1805 *
1806 * @returns Strict VBox status code.
1807 * @param pIemCpu The IEM state.
1808 * @param pu64 Where to return the opcode qword.
1809 */
1810DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1811{
1812 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1813 if (rcStrict == VINF_SUCCESS)
1814 {
1815 uint8_t offOpcode = pIemCpu->offOpcode;
1816 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1817 pIemCpu->abOpcode[offOpcode + 1],
1818 pIemCpu->abOpcode[offOpcode + 2],
1819 pIemCpu->abOpcode[offOpcode + 3]);
1820 pIemCpu->offOpcode = offOpcode + 4;
1821 }
1822 else
1823 *pu64 = 0;
1824 return rcStrict;
1825}
1826
1827
1828/**
1829 * Fetches the next opcode dword, sign extending it into a quad word.
1830 *
1831 * @returns Strict VBox status code.
1832 * @param pIemCpu The IEM state.
1833 * @param pu64 Where to return the opcode quad word.
1834 */
1835DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1836{
1837 uint8_t const offOpcode = pIemCpu->offOpcode;
1838 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1839 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1840
1841 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1842 pIemCpu->abOpcode[offOpcode + 1],
1843 pIemCpu->abOpcode[offOpcode + 2],
1844 pIemCpu->abOpcode[offOpcode + 3]);
1845 *pu64 = i32;
1846 pIemCpu->offOpcode = offOpcode + 4;
1847 return VINF_SUCCESS;
1848}
1849
1850
1851/**
1852 * Fetches the next opcode double word and sign extends it to a quad word,
1853 * returns automatically on failure.
1854 *
1855 * @param a_pu64 Where to return the opcode quad word.
1856 * @remark Implicitly references pIemCpu.
1857 */
1858#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1859 do \
1860 { \
1861 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1862 if (rcStrict2 != VINF_SUCCESS) \
1863 return rcStrict2; \
1864 } while (0)
1865
1866
1867/**
1868 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1869 *
1870 * @returns Strict VBox status code.
1871 * @param pIemCpu The IEM state.
1872 * @param pu64 Where to return the opcode qword.
1873 */
1874DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1875{
1876 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1877 if (rcStrict == VINF_SUCCESS)
1878 {
1879 uint8_t offOpcode = pIemCpu->offOpcode;
1880 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1881 pIemCpu->abOpcode[offOpcode + 1],
1882 pIemCpu->abOpcode[offOpcode + 2],
1883 pIemCpu->abOpcode[offOpcode + 3],
1884 pIemCpu->abOpcode[offOpcode + 4],
1885 pIemCpu->abOpcode[offOpcode + 5],
1886 pIemCpu->abOpcode[offOpcode + 6],
1887 pIemCpu->abOpcode[offOpcode + 7]);
1888 pIemCpu->offOpcode = offOpcode + 8;
1889 }
1890 else
1891 *pu64 = 0;
1892 return rcStrict;
1893}
1894
1895
1896/**
1897 * Fetches the next opcode qword.
1898 *
1899 * @returns Strict VBox status code.
1900 * @param pIemCpu The IEM state.
1901 * @param pu64 Where to return the opcode qword.
1902 */
1903DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1904{
1905 uint8_t const offOpcode = pIemCpu->offOpcode;
1906 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1907 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1908
1909 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1910 pIemCpu->abOpcode[offOpcode + 1],
1911 pIemCpu->abOpcode[offOpcode + 2],
1912 pIemCpu->abOpcode[offOpcode + 3],
1913 pIemCpu->abOpcode[offOpcode + 4],
1914 pIemCpu->abOpcode[offOpcode + 5],
1915 pIemCpu->abOpcode[offOpcode + 6],
1916 pIemCpu->abOpcode[offOpcode + 7]);
1917 pIemCpu->offOpcode = offOpcode + 8;
1918 return VINF_SUCCESS;
1919}
1920
1921
1922/**
1923 * Fetches the next opcode quad word, returns automatically on failure.
1924 *
1925 * @param a_pu64 Where to return the opcode quad word.
1926 * @remark Implicitly references pIemCpu.
1927 */
1928#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1929 do \
1930 { \
1931 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1932 if (rcStrict2 != VINF_SUCCESS) \
1933 return rcStrict2; \
1934 } while (0)
1935
1936
1937/** @name Misc Worker Functions.
1938 * @{
1939 */
1940
1941
1942/**
1943 * Validates a new SS segment.
1944 *
1945 * @returns VBox strict status code.
1946 * @param pIemCpu The IEM per CPU instance data.
1947 * @param pCtx The CPU context.
1948 * @param NewSS The new SS selctor.
1949 * @param uCpl The CPL to load the stack for.
1950 * @param pDesc Where to return the descriptor.
1951 */
1952IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1953{
1954 NOREF(pCtx);
1955
1956 /* Null selectors are not allowed (we're not called for dispatching
1957 interrupts with SS=0 in long mode). */
1958 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1959 {
1960 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #TS(0)\n", NewSS));
1961 return iemRaiseTaskSwitchFault0(pIemCpu);
1962 }
1963
1964 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1965 if ((NewSS & X86_SEL_RPL) != uCpl)
1966 {
1967 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1968 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1969 }
1970
1971 /*
1972 * Read the descriptor.
1973 */
1974 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1975 if (rcStrict != VINF_SUCCESS)
1976 return rcStrict;
1977
1978 /*
1979 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1980 */
1981 if (!pDesc->Legacy.Gen.u1DescType)
1982 {
1983 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1984 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1985 }
1986
1987 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1988 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1989 {
1990 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1991 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1992 }
1993 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1994 {
1995 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1996 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1997 }
1998
1999 /* Is it there? */
2000 /** @todo testcase: Is this checked before the canonical / limit check below? */
2001 if (!pDesc->Legacy.Gen.u1Present)
2002 {
2003 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2004 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2005 }
2006
2007 return VINF_SUCCESS;
2008}
2009
2010
2011/**
2012 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2013 * not.
2014 *
2015 * @param a_pIemCpu The IEM per CPU data.
2016 * @param a_pCtx The CPU context.
2017 */
2018#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2019# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2020 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2021 ? (a_pCtx)->eflags.u \
2022 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2023#else
2024# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2025 ( (a_pCtx)->eflags.u )
2026#endif
2027
2028/**
2029 * Updates the EFLAGS in the correct manner wrt. PATM.
2030 *
2031 * @param a_pIemCpu The IEM per CPU data.
2032 * @param a_pCtx The CPU context.
2033 */
2034#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2035# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2036 do { \
2037 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2038 (a_pCtx)->eflags.u = (a_fEfl); \
2039 else \
2040 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2041 } while (0)
2042#else
2043# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2044 do { \
2045 (a_pCtx)->eflags.u = (a_fEfl); \
2046 } while (0)
2047#endif
2048
2049
2050/** @} */
2051
2052/** @name Raising Exceptions.
2053 *
2054 * @{
2055 */
2056
2057/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2058 * @{ */
2059/** CPU exception. */
2060#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2061/** External interrupt (from PIC, APIC, whatever). */
2062#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2063/** Software interrupt (int or into, not bound).
2064 * Returns to the following instruction */
2065#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2066/** Takes an error code. */
2067#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2068/** Takes a CR2. */
2069#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2070/** Generated by the breakpoint instruction. */
2071#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2072/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2073#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2074/** @} */
2075
2076
2077/**
2078 * Loads the specified stack far pointer from the TSS.
2079 *
2080 * @returns VBox strict status code.
2081 * @param pIemCpu The IEM per CPU instance data.
2082 * @param pCtx The CPU context.
2083 * @param uCpl The CPL to load the stack for.
2084 * @param pSelSS Where to return the new stack segment.
2085 * @param puEsp Where to return the new stack pointer.
2086 */
2087IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2088 PRTSEL pSelSS, uint32_t *puEsp)
2089{
2090 VBOXSTRICTRC rcStrict;
2091 Assert(uCpl < 4);
2092 *puEsp = 0; /* make gcc happy */
2093 *pSelSS = 0; /* make gcc happy */
2094
2095 switch (pCtx->tr.Attr.n.u4Type)
2096 {
2097 /*
2098 * 16-bit TSS (X86TSS16).
2099 */
2100 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2101 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2102 {
2103 uint32_t off = uCpl * 4 + 2;
2104 if (off + 4 > pCtx->tr.u32Limit)
2105 {
2106 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2107 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2108 }
2109
2110 uint32_t u32Tmp = 0; /* gcc maybe... */
2111 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2112 if (rcStrict == VINF_SUCCESS)
2113 {
2114 *puEsp = RT_LOWORD(u32Tmp);
2115 *pSelSS = RT_HIWORD(u32Tmp);
2116 return VINF_SUCCESS;
2117 }
2118 break;
2119 }
2120
2121 /*
2122 * 32-bit TSS (X86TSS32).
2123 */
2124 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2125 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2126 {
2127 uint32_t off = uCpl * 8 + 4;
2128 if (off + 7 > pCtx->tr.u32Limit)
2129 {
2130 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2131 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2132 }
2133
2134 uint64_t u64Tmp;
2135 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2136 if (rcStrict == VINF_SUCCESS)
2137 {
2138 *puEsp = u64Tmp & UINT32_MAX;
2139 *pSelSS = (RTSEL)(u64Tmp >> 32);
2140 return VINF_SUCCESS;
2141 }
2142 break;
2143 }
2144
2145 default:
2146 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
2147 }
2148 return rcStrict;
2149}
2150
2151
2152/**
2153 * Loads the specified stack pointer from the 64-bit TSS.
2154 *
2155 * @returns VBox strict status code.
2156 * @param pIemCpu The IEM per CPU instance data.
2157 * @param pCtx The CPU context.
2158 * @param uCpl The CPL to load the stack for.
2159 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2160 * @param puRsp Where to return the new stack pointer.
2161 */
2162IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2163{
2164 Assert(uCpl < 4);
2165 Assert(uIst < 8);
2166 *puRsp = 0; /* make gcc happy */
2167
2168 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2);
2169
2170 uint32_t off;
2171 if (uIst)
2172 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2173 else
2174 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2175 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2176 {
2177 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2178 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2179 }
2180
2181 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2182}
2183
2184
2185/**
2186 * Adjust the CPU state according to the exception being raised.
2187 *
2188 * @param pCtx The CPU context.
2189 * @param u8Vector The exception that has been raised.
2190 */
2191DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2192{
2193 switch (u8Vector)
2194 {
2195 case X86_XCPT_DB:
2196 pCtx->dr[7] &= ~X86_DR7_GD;
2197 break;
2198 /** @todo Read the AMD and Intel exception reference... */
2199 }
2200}
2201
2202
2203/**
2204 * Implements exceptions and interrupts for real mode.
2205 *
2206 * @returns VBox strict status code.
2207 * @param pIemCpu The IEM per CPU instance data.
2208 * @param pCtx The CPU context.
2209 * @param cbInstr The number of bytes to offset rIP by in the return
2210 * address.
2211 * @param u8Vector The interrupt / exception vector number.
2212 * @param fFlags The flags.
2213 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2214 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2215 */
2216IEM_STATIC VBOXSTRICTRC
2217iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2218 PCPUMCTX pCtx,
2219 uint8_t cbInstr,
2220 uint8_t u8Vector,
2221 uint32_t fFlags,
2222 uint16_t uErr,
2223 uint64_t uCr2)
2224{
2225 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
2226 NOREF(uErr); NOREF(uCr2);
2227
2228 /*
2229 * Read the IDT entry.
2230 */
2231 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2232 {
2233 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2234 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2235 }
2236 RTFAR16 Idte;
2237 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2238 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2239 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2240 return rcStrict;
2241
2242 /*
2243 * Push the stack frame.
2244 */
2245 uint16_t *pu16Frame;
2246 uint64_t uNewRsp;
2247 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2248 if (rcStrict != VINF_SUCCESS)
2249 return rcStrict;
2250
2251 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2252 pu16Frame[2] = (uint16_t)fEfl;
2253 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2254 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2255 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2256 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2257 return rcStrict;
2258
2259 /*
2260 * Load the vector address into cs:ip and make exception specific state
2261 * adjustments.
2262 */
2263 pCtx->cs.Sel = Idte.sel;
2264 pCtx->cs.ValidSel = Idte.sel;
2265 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2266 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2267 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2268 pCtx->rip = Idte.off;
2269 fEfl &= ~X86_EFL_IF;
2270 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2271
2272 /** @todo do we actually do this in real mode? */
2273 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2274 iemRaiseXcptAdjustState(pCtx, u8Vector);
2275
2276 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2277}
2278
2279
2280/**
2281 * Loads a NULL data selector into when coming from V8086 mode.
2282 *
2283 * @param pIemCpu The IEM per CPU instance data.
2284 * @param pSReg Pointer to the segment register.
2285 */
2286IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2287{
2288 pSReg->Sel = 0;
2289 pSReg->ValidSel = 0;
2290 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2291 {
2292 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2293 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2294 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2295 }
2296 else
2297 {
2298 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2299 /** @todo check this on AMD-V */
2300 pSReg->u64Base = 0;
2301 pSReg->u32Limit = 0;
2302 }
2303}
2304
2305
2306/**
2307 * Loads a segment selector during a task switch in V8086 mode.
2308 *
2309 * @param pIemCpu The IEM per CPU instance data.
2310 * @param pSReg Pointer to the segment register.
2311 * @param uSel The selector value to load.
2312 */
2313IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2314{
2315 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2316 pSReg->Sel = uSel;
2317 pSReg->ValidSel = uSel;
2318 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2319 pSReg->u64Base = uSel << 4;
2320 pSReg->u32Limit = 0xffff;
2321 pSReg->Attr.u = 0xf3;
2322}
2323
2324
2325/**
2326 * Loads a NULL data selector into a selector register, both the hidden and
2327 * visible parts, in protected mode.
2328 *
2329 * @param pIemCpu The IEM state of the calling EMT.
2330 * @param pSReg Pointer to the segment register.
2331 * @param uRpl The RPL.
2332 */
2333IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2334{
2335 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2336 * data selector in protected mode. */
2337 pSReg->Sel = uRpl;
2338 pSReg->ValidSel = uRpl;
2339 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2340 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2341 {
2342 /* VT-x (Intel 3960x) observed doing something like this. */
2343 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2344 pSReg->u32Limit = UINT32_MAX;
2345 pSReg->u64Base = 0;
2346 }
2347 else
2348 {
2349 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2350 pSReg->u32Limit = 0;
2351 pSReg->u64Base = 0;
2352 }
2353}
2354
2355
2356/**
2357 * Loads a segment selector during a task switch in protected mode. In this task
2358 * switch scenario, we would throw #TS exceptions rather than #GPs.
2359 *
2360 * @returns VBox strict status code.
2361 * @param pIemCpu The IEM per CPU instance data.
2362 * @param pSReg Pointer to the segment register.
2363 * @param uSel The new selector value.
2364 *
2365 * @remarks This does -NOT- handle CS or SS.
2366 * @remarks This expects pIemCpu->uCpl to be up to date.
2367 */
2368IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2369{
2370 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2371
2372 /* Null data selector. */
2373 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2374 {
2375 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2376 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2377 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2378 return VINF_SUCCESS;
2379 }
2380
2381 /* Fetch the descriptor. */
2382 IEMSELDESC Desc;
2383 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2384 if (rcStrict != VINF_SUCCESS)
2385 {
2386 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2387 VBOXSTRICTRC_VAL(rcStrict)));
2388 return rcStrict;
2389 }
2390
2391 /* Must be a data segment or readable code segment. */
2392 if ( !Desc.Legacy.Gen.u1DescType
2393 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2394 {
2395 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2396 Desc.Legacy.Gen.u4Type));
2397 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2398 }
2399
2400 /* Check privileges for data segments and non-conforming code segments. */
2401 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2402 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2403 {
2404 /* The RPL and the new CPL must be less than or equal to the DPL. */
2405 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2406 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2407 {
2408 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2409 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2410 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2411 }
2412 }
2413
2414 /* Is it there? */
2415 if (!Desc.Legacy.Gen.u1Present)
2416 {
2417 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2418 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2419 }
2420
2421 /* The base and limit. */
2422 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2423 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2424
2425 /*
2426 * Ok, everything checked out fine. Now set the accessed bit before
2427 * committing the result into the registers.
2428 */
2429 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2430 {
2431 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2432 if (rcStrict != VINF_SUCCESS)
2433 return rcStrict;
2434 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2435 }
2436
2437 /* Commit */
2438 pSReg->Sel = uSel;
2439 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2440 pSReg->u32Limit = cbLimit;
2441 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2442 pSReg->ValidSel = uSel;
2443 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2444 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2445 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2446
2447 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2448 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2449 return VINF_SUCCESS;
2450}
2451
2452
2453/**
2454 * Performs a task switch.
2455 *
2456 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2457 * caller is responsible for performing the necessary checks (like DPL, TSS
2458 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2459 * reference for JMP, CALL, IRET.
2460 *
2461 * If the task switch is the due to a software interrupt or hardware exception,
2462 * the caller is responsible for validating the TSS selector and descriptor. See
2463 * Intel Instruction reference for INT n.
2464 *
2465 * @returns VBox strict status code.
2466 * @param pIemCpu The IEM per CPU instance data.
2467 * @param pCtx The CPU context.
2468 * @param enmTaskSwitch What caused this task switch.
2469 * @param uNextEip The EIP effective after the task switch.
2470 * @param fFlags The flags.
2471 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2472 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2473 * @param SelTSS The TSS selector of the new task.
2474 * @param pNewDescTSS Pointer to the new TSS descriptor.
2475 */
2476IEM_STATIC VBOXSTRICTRC
2477iemTaskSwitch(PIEMCPU pIemCpu,
2478 PCPUMCTX pCtx,
2479 IEMTASKSWITCH enmTaskSwitch,
2480 uint32_t uNextEip,
2481 uint32_t fFlags,
2482 uint16_t uErr,
2483 uint64_t uCr2,
2484 RTSEL SelTSS,
2485 PIEMSELDESC pNewDescTSS)
2486{
2487 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2488 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2489
2490 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2491 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2492 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2493 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2494 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2495
2496 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2497 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2498
2499 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2500 fIsNewTSS386, pCtx->eip, uNextEip));
2501
2502 /* Update CR2 in case it's a page-fault. */
2503 /** @todo This should probably be done much earlier in IEM/PGM. See
2504 * @bugref{5653} comment #49. */
2505 if (fFlags & IEM_XCPT_FLAGS_CR2)
2506 pCtx->cr2 = uCr2;
2507
2508 /*
2509 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2510 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2511 */
2512 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2513 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2514 if (uNewTSSLimit < uNewTSSLimitMin)
2515 {
2516 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2517 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2518 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2519 }
2520
2521 /*
2522 * Check the current TSS limit. The last written byte to the current TSS during the
2523 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2524 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2525 *
2526 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2527 * end up with smaller than "legal" TSS limits.
2528 */
2529 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2530 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2531 if (uCurTSSLimit < uCurTSSLimitMin)
2532 {
2533 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2534 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2535 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2536 }
2537
2538 /*
2539 * Verify that the new TSS can be accessed and map it. Map only the required contents
2540 * and not the entire TSS.
2541 */
2542 void *pvNewTSS;
2543 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2544 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2545 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, IntRedirBitmap) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2546 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2547 * not perform correct translation if this happens. See Intel spec. 7.2.1
2548 * "Task-State Segment" */
2549 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2550 if (rcStrict != VINF_SUCCESS)
2551 {
2552 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2553 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2554 return rcStrict;
2555 }
2556
2557 /*
2558 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2559 */
2560 uint32_t u32EFlags = pCtx->eflags.u32;
2561 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2562 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2563 {
2564 PX86DESC pDescCurTSS;
2565 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2566 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2567 if (rcStrict != VINF_SUCCESS)
2568 {
2569 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2570 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2571 return rcStrict;
2572 }
2573
2574 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2575 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2576 if (rcStrict != VINF_SUCCESS)
2577 {
2578 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2579 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2580 return rcStrict;
2581 }
2582
2583 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2584 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2585 {
2586 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2587 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2588 u32EFlags &= ~X86_EFL_NT;
2589 }
2590 }
2591
2592 /*
2593 * Save the CPU state into the current TSS.
2594 */
2595 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2596 if (GCPtrNewTSS == GCPtrCurTSS)
2597 {
2598 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2599 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2600 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2601 }
2602 if (fIsNewTSS386)
2603 {
2604 /*
2605 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2606 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2607 */
2608 void *pvCurTSS32;
2609 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2610 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2611 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2612 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2613 if (rcStrict != VINF_SUCCESS)
2614 {
2615 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2616 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2617 return rcStrict;
2618 }
2619
2620 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2621 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2622 pCurTSS32->eip = uNextEip;
2623 pCurTSS32->eflags = u32EFlags;
2624 pCurTSS32->eax = pCtx->eax;
2625 pCurTSS32->ecx = pCtx->ecx;
2626 pCurTSS32->edx = pCtx->edx;
2627 pCurTSS32->ebx = pCtx->ebx;
2628 pCurTSS32->esp = pCtx->esp;
2629 pCurTSS32->ebp = pCtx->ebp;
2630 pCurTSS32->esi = pCtx->esi;
2631 pCurTSS32->edi = pCtx->edi;
2632 pCurTSS32->es = pCtx->es.Sel;
2633 pCurTSS32->cs = pCtx->cs.Sel;
2634 pCurTSS32->ss = pCtx->ss.Sel;
2635 pCurTSS32->ds = pCtx->ds.Sel;
2636 pCurTSS32->fs = pCtx->fs.Sel;
2637 pCurTSS32->gs = pCtx->gs.Sel;
2638
2639 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2640 if (rcStrict != VINF_SUCCESS)
2641 {
2642 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2643 VBOXSTRICTRC_VAL(rcStrict)));
2644 return rcStrict;
2645 }
2646 }
2647 else
2648 {
2649 /*
2650 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2651 */
2652 void *pvCurTSS16;
2653 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2654 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2655 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2656 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2657 if (rcStrict != VINF_SUCCESS)
2658 {
2659 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2660 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2661 return rcStrict;
2662 }
2663
2664 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2665 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2666 pCurTSS16->ip = uNextEip;
2667 pCurTSS16->flags = u32EFlags;
2668 pCurTSS16->ax = pCtx->ax;
2669 pCurTSS16->cx = pCtx->cx;
2670 pCurTSS16->dx = pCtx->dx;
2671 pCurTSS16->bx = pCtx->bx;
2672 pCurTSS16->sp = pCtx->sp;
2673 pCurTSS16->bp = pCtx->bp;
2674 pCurTSS16->si = pCtx->si;
2675 pCurTSS16->di = pCtx->di;
2676 pCurTSS16->es = pCtx->es.Sel;
2677 pCurTSS16->cs = pCtx->cs.Sel;
2678 pCurTSS16->ss = pCtx->ss.Sel;
2679 pCurTSS16->ds = pCtx->ds.Sel;
2680
2681 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2682 if (rcStrict != VINF_SUCCESS)
2683 {
2684 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2685 VBOXSTRICTRC_VAL(rcStrict)));
2686 return rcStrict;
2687 }
2688 }
2689
2690 /*
2691 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2692 */
2693 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2694 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2695 {
2696 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2697 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2698 pNewTSS->selPrev = pCtx->tr.Sel;
2699 }
2700
2701 /*
2702 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2703 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2704 */
2705 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2706 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2707 bool fNewDebugTrap;
2708 if (fIsNewTSS386)
2709 {
2710 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2711 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2712 uNewEip = pNewTSS32->eip;
2713 uNewEflags = pNewTSS32->eflags;
2714 uNewEax = pNewTSS32->eax;
2715 uNewEcx = pNewTSS32->ecx;
2716 uNewEdx = pNewTSS32->edx;
2717 uNewEbx = pNewTSS32->ebx;
2718 uNewEsp = pNewTSS32->esp;
2719 uNewEbp = pNewTSS32->ebp;
2720 uNewEsi = pNewTSS32->esi;
2721 uNewEdi = pNewTSS32->edi;
2722 uNewES = pNewTSS32->es;
2723 uNewCS = pNewTSS32->cs;
2724 uNewSS = pNewTSS32->ss;
2725 uNewDS = pNewTSS32->ds;
2726 uNewFS = pNewTSS32->fs;
2727 uNewGS = pNewTSS32->gs;
2728 uNewLdt = pNewTSS32->selLdt;
2729 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2730 }
2731 else
2732 {
2733 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2734 uNewCr3 = 0;
2735 uNewEip = pNewTSS16->ip;
2736 uNewEflags = pNewTSS16->flags;
2737 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2738 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2739 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2740 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2741 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2742 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2743 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2744 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2745 uNewES = pNewTSS16->es;
2746 uNewCS = pNewTSS16->cs;
2747 uNewSS = pNewTSS16->ss;
2748 uNewDS = pNewTSS16->ds;
2749 uNewFS = 0;
2750 uNewGS = 0;
2751 uNewLdt = pNewTSS16->selLdt;
2752 fNewDebugTrap = false;
2753 }
2754
2755 if (GCPtrNewTSS == GCPtrCurTSS)
2756 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2757 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2758
2759 /*
2760 * We're done accessing the new TSS.
2761 */
2762 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2763 if (rcStrict != VINF_SUCCESS)
2764 {
2765 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2766 return rcStrict;
2767 }
2768
2769 /*
2770 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2771 */
2772 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2773 {
2774 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2775 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2776 if (rcStrict != VINF_SUCCESS)
2777 {
2778 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2779 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2780 return rcStrict;
2781 }
2782
2783 /* Check that the descriptor indicates the new TSS is available (not busy). */
2784 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2785 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2786 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2787
2788 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2789 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2790 if (rcStrict != VINF_SUCCESS)
2791 {
2792 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2793 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2794 return rcStrict;
2795 }
2796 }
2797
2798 /*
2799 * From this point on, we're technically in the new task. We will defer exceptions
2800 * until the completion of the task switch but before executing any instructions in the new task.
2801 */
2802 pCtx->tr.Sel = SelTSS;
2803 pCtx->tr.ValidSel = SelTSS;
2804 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2805 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2806 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2807 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2808 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2809
2810 /* Set the busy bit in TR. */
2811 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2812 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2813 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2814 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2815 {
2816 uNewEflags |= X86_EFL_NT;
2817 }
2818
2819 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2820 pCtx->cr0 |= X86_CR0_TS;
2821 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2822
2823 pCtx->eip = uNewEip;
2824 pCtx->eax = uNewEax;
2825 pCtx->ecx = uNewEcx;
2826 pCtx->edx = uNewEdx;
2827 pCtx->ebx = uNewEbx;
2828 pCtx->esp = uNewEsp;
2829 pCtx->ebp = uNewEbp;
2830 pCtx->esi = uNewEsi;
2831 pCtx->edi = uNewEdi;
2832
2833 uNewEflags &= X86_EFL_LIVE_MASK;
2834 uNewEflags |= X86_EFL_RA1_MASK;
2835 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2836
2837 /*
2838 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2839 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2840 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2841 */
2842 pCtx->es.Sel = uNewES;
2843 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2844 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2845
2846 pCtx->cs.Sel = uNewCS;
2847 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2848 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2849
2850 pCtx->ss.Sel = uNewSS;
2851 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2852 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2853
2854 pCtx->ds.Sel = uNewDS;
2855 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2856 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2857
2858 pCtx->fs.Sel = uNewFS;
2859 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2860 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2861
2862 pCtx->gs.Sel = uNewGS;
2863 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2864 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2865 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2866
2867 pCtx->ldtr.Sel = uNewLdt;
2868 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2869 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2870 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2871
2872 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2873 {
2874 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2875 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2876 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2877 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2878 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2879 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2880 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2881 }
2882
2883 /*
2884 * Switch CR3 for the new task.
2885 */
2886 if ( fIsNewTSS386
2887 && (pCtx->cr0 & X86_CR0_PG))
2888 {
2889 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2890 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2891 {
2892 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2893 AssertRCSuccessReturn(rc, rc);
2894 }
2895 else
2896 pCtx->cr3 = uNewCr3;
2897
2898 /* Inform PGM. */
2899 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2900 {
2901 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2902 AssertRCReturn(rc, rc);
2903 /* ignore informational status codes */
2904 }
2905 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2906 }
2907
2908 /*
2909 * Switch LDTR for the new task.
2910 */
2911 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2912 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2913 else
2914 {
2915 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2916
2917 IEMSELDESC DescNewLdt;
2918 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2919 if (rcStrict != VINF_SUCCESS)
2920 {
2921 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2922 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2923 return rcStrict;
2924 }
2925 if ( !DescNewLdt.Legacy.Gen.u1Present
2926 || DescNewLdt.Legacy.Gen.u1DescType
2927 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2928 {
2929 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2930 uNewLdt, DescNewLdt.Legacy.u));
2931 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2932 }
2933
2934 pCtx->ldtr.ValidSel = uNewLdt;
2935 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2936 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2937 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2938 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2939 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2940 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2941 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2942 }
2943
2944 IEMSELDESC DescSS;
2945 if (IEM_IS_V86_MODE(pIemCpu))
2946 {
2947 pIemCpu->uCpl = 3;
2948 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
2949 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
2950 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
2951 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
2952 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
2953 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
2954 }
2955 else
2956 {
2957 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
2958
2959 /*
2960 * Load the stack segment for the new task.
2961 */
2962 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2963 {
2964 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2965 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2966 }
2967
2968 /* Fetch the descriptor. */
2969 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
2970 if (rcStrict != VINF_SUCCESS)
2971 {
2972 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2973 VBOXSTRICTRC_VAL(rcStrict)));
2974 return rcStrict;
2975 }
2976
2977 /* SS must be a data segment and writable. */
2978 if ( !DescSS.Legacy.Gen.u1DescType
2979 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2980 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2981 {
2982 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2983 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2984 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2985 }
2986
2987 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2988 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2989 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2990 {
2991 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2992 uNewCpl));
2993 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2994 }
2995
2996 /* Is it there? */
2997 if (!DescSS.Legacy.Gen.u1Present)
2998 {
2999 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3000 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3001 }
3002
3003 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3004 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3005
3006 /* Set the accessed bit before committing the result into SS. */
3007 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3008 {
3009 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3010 if (rcStrict != VINF_SUCCESS)
3011 return rcStrict;
3012 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3013 }
3014
3015 /* Commit SS. */
3016 pCtx->ss.Sel = uNewSS;
3017 pCtx->ss.ValidSel = uNewSS;
3018 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3019 pCtx->ss.u32Limit = cbLimit;
3020 pCtx->ss.u64Base = u64Base;
3021 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3022 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3023
3024 /* CPL has changed, update IEM before loading rest of segments. */
3025 pIemCpu->uCpl = uNewCpl;
3026
3027 /*
3028 * Load the data segments for the new task.
3029 */
3030 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3031 if (rcStrict != VINF_SUCCESS)
3032 return rcStrict;
3033 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3034 if (rcStrict != VINF_SUCCESS)
3035 return rcStrict;
3036 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3037 if (rcStrict != VINF_SUCCESS)
3038 return rcStrict;
3039 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3040 if (rcStrict != VINF_SUCCESS)
3041 return rcStrict;
3042
3043 /*
3044 * Load the code segment for the new task.
3045 */
3046 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3047 {
3048 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3049 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3050 }
3051
3052 /* Fetch the descriptor. */
3053 IEMSELDESC DescCS;
3054 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3055 if (rcStrict != VINF_SUCCESS)
3056 {
3057 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3058 return rcStrict;
3059 }
3060
3061 /* CS must be a code segment. */
3062 if ( !DescCS.Legacy.Gen.u1DescType
3063 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3064 {
3065 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3066 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3067 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3068 }
3069
3070 /* For conforming CS, DPL must be less than or equal to the RPL. */
3071 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3072 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3073 {
3074 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3075 DescCS.Legacy.Gen.u2Dpl));
3076 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3077 }
3078
3079 /* For non-conforming CS, DPL must match RPL. */
3080 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3081 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3082 {
3083 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3084 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3085 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3086 }
3087
3088 /* Is it there? */
3089 if (!DescCS.Legacy.Gen.u1Present)
3090 {
3091 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3092 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3093 }
3094
3095 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3096 u64Base = X86DESC_BASE(&DescCS.Legacy);
3097
3098 /* Set the accessed bit before committing the result into CS. */
3099 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3100 {
3101 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3102 if (rcStrict != VINF_SUCCESS)
3103 return rcStrict;
3104 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3105 }
3106
3107 /* Commit CS. */
3108 pCtx->cs.Sel = uNewCS;
3109 pCtx->cs.ValidSel = uNewCS;
3110 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3111 pCtx->cs.u32Limit = cbLimit;
3112 pCtx->cs.u64Base = u64Base;
3113 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3114 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3115 }
3116
3117 /** @todo Debug trap. */
3118 if (fIsNewTSS386 && fNewDebugTrap)
3119 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3120
3121 /*
3122 * Construct the error code masks based on what caused this task switch.
3123 * See Intel Instruction reference for INT.
3124 */
3125 uint16_t uExt;
3126 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3127 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3128 {
3129 uExt = 1;
3130 }
3131 else
3132 uExt = 0;
3133
3134 /*
3135 * Push any error code on to the new stack.
3136 */
3137 if (fFlags & IEM_XCPT_FLAGS_ERR)
3138 {
3139 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3140 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3141 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3142
3143 /* Check that there is sufficient space on the stack. */
3144 /** @todo Factor out segment limit checking for normal/expand down segments
3145 * into a separate function. */
3146 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3147 {
3148 if ( pCtx->esp - 1 > cbLimitSS
3149 || pCtx->esp < cbStackFrame)
3150 {
3151 /** @todo Intel says #SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3152 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3153 cbStackFrame));
3154 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3155 }
3156 }
3157 else
3158 {
3159 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3160 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3161 {
3162 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3163 cbStackFrame));
3164 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3165 }
3166 }
3167
3168
3169 if (fIsNewTSS386)
3170 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3171 else
3172 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3173 if (rcStrict != VINF_SUCCESS)
3174 {
3175 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3176 VBOXSTRICTRC_VAL(rcStrict)));
3177 return rcStrict;
3178 }
3179 }
3180
3181 /* Check the new EIP against the new CS limit. */
3182 if (pCtx->eip > pCtx->cs.u32Limit)
3183 {
3184 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3185 pCtx->eip, pCtx->cs.u32Limit));
3186 /** @todo Intel says #GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3187 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3188 }
3189
3190 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3191 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3192}
3193
3194
3195/**
3196 * Implements exceptions and interrupts for protected mode.
3197 *
3198 * @returns VBox strict status code.
3199 * @param pIemCpu The IEM per CPU instance data.
3200 * @param pCtx The CPU context.
3201 * @param cbInstr The number of bytes to offset rIP by in the return
3202 * address.
3203 * @param u8Vector The interrupt / exception vector number.
3204 * @param fFlags The flags.
3205 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3206 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3207 */
3208IEM_STATIC VBOXSTRICTRC
3209iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3210 PCPUMCTX pCtx,
3211 uint8_t cbInstr,
3212 uint8_t u8Vector,
3213 uint32_t fFlags,
3214 uint16_t uErr,
3215 uint64_t uCr2)
3216{
3217 /*
3218 * Read the IDT entry.
3219 */
3220 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3221 {
3222 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3223 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3224 }
3225 X86DESC Idte;
3226 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3227 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3228 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3229 return rcStrict;
3230 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3231 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3232 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3233
3234 /*
3235 * Check the descriptor type, DPL and such.
3236 * ASSUMES this is done in the same order as described for call-gate calls.
3237 */
3238 if (Idte.Gate.u1DescType)
3239 {
3240 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3241 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3242 }
3243 bool fTaskGate = false;
3244 uint8_t f32BitGate = true;
3245 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3246 switch (Idte.Gate.u4Type)
3247 {
3248 case X86_SEL_TYPE_SYS_UNDEFINED:
3249 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3250 case X86_SEL_TYPE_SYS_LDT:
3251 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3252 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3253 case X86_SEL_TYPE_SYS_UNDEFINED2:
3254 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3255 case X86_SEL_TYPE_SYS_UNDEFINED3:
3256 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3257 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3258 case X86_SEL_TYPE_SYS_UNDEFINED4:
3259 {
3260 /** @todo check what actually happens when the type is wrong...
3261 * esp. call gates. */
3262 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3263 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3264 }
3265
3266 case X86_SEL_TYPE_SYS_286_INT_GATE:
3267 f32BitGate = false;
3268 case X86_SEL_TYPE_SYS_386_INT_GATE:
3269 fEflToClear |= X86_EFL_IF;
3270 break;
3271
3272 case X86_SEL_TYPE_SYS_TASK_GATE:
3273 fTaskGate = true;
3274#ifndef IEM_IMPLEMENTS_TASKSWITCH
3275 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3276#endif
3277 break;
3278
3279 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3280 f32BitGate = false;
3281 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3282 break;
3283
3284 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3285 }
3286
3287 /* Check DPL against CPL if applicable. */
3288 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3289 {
3290 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3291 {
3292 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3293 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3294 }
3295 }
3296
3297 /* Is it there? */
3298 if (!Idte.Gate.u1Present)
3299 {
3300 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3301 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3302 }
3303
3304 /* Is it a task-gate? */
3305 if (fTaskGate)
3306 {
3307 /*
3308 * Construct the error code masks based on what caused this task switch.
3309 * See Intel Instruction reference for INT.
3310 */
3311 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3312 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3313 RTSEL SelTSS = Idte.Gate.u16Sel;
3314
3315 /*
3316 * Fetch the TSS descriptor in the GDT.
3317 */
3318 IEMSELDESC DescTSS;
3319 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3320 if (rcStrict != VINF_SUCCESS)
3321 {
3322 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3323 VBOXSTRICTRC_VAL(rcStrict)));
3324 return rcStrict;
3325 }
3326
3327 /* The TSS descriptor must be a system segment and be available (not busy). */
3328 if ( DescTSS.Legacy.Gen.u1DescType
3329 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3330 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3331 {
3332 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3333 u8Vector, SelTSS, DescTSS.Legacy.au64));
3334 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3335 }
3336
3337 /* The TSS must be present. */
3338 if (!DescTSS.Legacy.Gen.u1Present)
3339 {
3340 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3341 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3342 }
3343
3344 /* Do the actual task switch. */
3345 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3346 }
3347
3348 /* A null CS is bad. */
3349 RTSEL NewCS = Idte.Gate.u16Sel;
3350 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3351 {
3352 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3353 return iemRaiseGeneralProtectionFault0(pIemCpu);
3354 }
3355
3356 /* Fetch the descriptor for the new CS. */
3357 IEMSELDESC DescCS;
3358 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3359 if (rcStrict != VINF_SUCCESS)
3360 {
3361 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3362 return rcStrict;
3363 }
3364
3365 /* Must be a code segment. */
3366 if (!DescCS.Legacy.Gen.u1DescType)
3367 {
3368 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3369 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3370 }
3371 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3372 {
3373 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3374 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3375 }
3376
3377 /* Don't allow lowering the privilege level. */
3378 /** @todo Does the lowering of privileges apply to software interrupts
3379 * only? This has bearings on the more-privileged or
3380 * same-privilege stack behavior further down. A testcase would
3381 * be nice. */
3382 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3383 {
3384 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3385 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3386 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3387 }
3388
3389 /* Make sure the selector is present. */
3390 if (!DescCS.Legacy.Gen.u1Present)
3391 {
3392 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3393 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3394 }
3395
3396 /* Check the new EIP against the new CS limit. */
3397 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3398 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3399 ? Idte.Gate.u16OffsetLow
3400 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3401 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3402 if (uNewEip > cbLimitCS)
3403 {
3404 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3405 u8Vector, uNewEip, cbLimitCS, NewCS));
3406 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3407 }
3408
3409 /* Calc the flag image to push. */
3410 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3411 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3412 fEfl &= ~X86_EFL_RF;
3413 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3414 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3415
3416 /* From V8086 mode only go to CPL 0. */
3417 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3418 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3419 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3420 {
3421 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3422 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3423 }
3424
3425 /*
3426 * If the privilege level changes, we need to get a new stack from the TSS.
3427 * This in turns means validating the new SS and ESP...
3428 */
3429 if (uNewCpl != pIemCpu->uCpl)
3430 {
3431 RTSEL NewSS;
3432 uint32_t uNewEsp;
3433 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3434 if (rcStrict != VINF_SUCCESS)
3435 return rcStrict;
3436
3437 IEMSELDESC DescSS;
3438 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3439 if (rcStrict != VINF_SUCCESS)
3440 return rcStrict;
3441
3442 /* Check that there is sufficient space for the stack frame. */
3443 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3444 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3445 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3446 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3447
3448 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3449 {
3450 if ( uNewEsp - 1 > cbLimitSS
3451 || uNewEsp < cbStackFrame)
3452 {
3453 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3454 u8Vector, NewSS, uNewEsp, cbStackFrame));
3455 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3456 }
3457 }
3458 else
3459 {
3460 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3461 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3462 {
3463 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3464 u8Vector, NewSS, uNewEsp, cbStackFrame));
3465 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3466 }
3467 }
3468
3469 /*
3470 * Start making changes.
3471 */
3472
3473 /* Create the stack frame. */
3474 RTPTRUNION uStackFrame;
3475 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3476 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3477 if (rcStrict != VINF_SUCCESS)
3478 return rcStrict;
3479 void * const pvStackFrame = uStackFrame.pv;
3480 if (f32BitGate)
3481 {
3482 if (fFlags & IEM_XCPT_FLAGS_ERR)
3483 *uStackFrame.pu32++ = uErr;
3484 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3485 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3486 uStackFrame.pu32[2] = fEfl;
3487 uStackFrame.pu32[3] = pCtx->esp;
3488 uStackFrame.pu32[4] = pCtx->ss.Sel;
3489 if (fEfl & X86_EFL_VM)
3490 {
3491 uStackFrame.pu32[1] = pCtx->cs.Sel;
3492 uStackFrame.pu32[5] = pCtx->es.Sel;
3493 uStackFrame.pu32[6] = pCtx->ds.Sel;
3494 uStackFrame.pu32[7] = pCtx->fs.Sel;
3495 uStackFrame.pu32[8] = pCtx->gs.Sel;
3496 }
3497 }
3498 else
3499 {
3500 if (fFlags & IEM_XCPT_FLAGS_ERR)
3501 *uStackFrame.pu16++ = uErr;
3502 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3503 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3504 uStackFrame.pu16[2] = fEfl;
3505 uStackFrame.pu16[3] = pCtx->sp;
3506 uStackFrame.pu16[4] = pCtx->ss.Sel;
3507 if (fEfl & X86_EFL_VM)
3508 {
3509 uStackFrame.pu16[1] = pCtx->cs.Sel;
3510 uStackFrame.pu16[5] = pCtx->es.Sel;
3511 uStackFrame.pu16[6] = pCtx->ds.Sel;
3512 uStackFrame.pu16[7] = pCtx->fs.Sel;
3513 uStackFrame.pu16[8] = pCtx->gs.Sel;
3514 }
3515 }
3516 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3517 if (rcStrict != VINF_SUCCESS)
3518 return rcStrict;
3519
3520 /* Mark the selectors 'accessed' (hope this is the correct time). */
3521 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3522 * after pushing the stack frame? (Write protect the gdt + stack to
3523 * find out.) */
3524 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3525 {
3526 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3527 if (rcStrict != VINF_SUCCESS)
3528 return rcStrict;
3529 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3530 }
3531
3532 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3533 {
3534 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3535 if (rcStrict != VINF_SUCCESS)
3536 return rcStrict;
3537 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3538 }
3539
3540 /*
3541 * Start comitting the register changes (joins with the DPL=CPL branch).
3542 */
3543 pCtx->ss.Sel = NewSS;
3544 pCtx->ss.ValidSel = NewSS;
3545 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3546 pCtx->ss.u32Limit = cbLimitSS;
3547 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3548 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3549 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
3550 pIemCpu->uCpl = uNewCpl;
3551
3552 if (fEfl & X86_EFL_VM)
3553 {
3554 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3555 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3556 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3557 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3558 }
3559 }
3560 /*
3561 * Same privilege, no stack change and smaller stack frame.
3562 */
3563 else
3564 {
3565 uint64_t uNewRsp;
3566 RTPTRUNION uStackFrame;
3567 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3568 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3569 if (rcStrict != VINF_SUCCESS)
3570 return rcStrict;
3571 void * const pvStackFrame = uStackFrame.pv;
3572
3573 if (f32BitGate)
3574 {
3575 if (fFlags & IEM_XCPT_FLAGS_ERR)
3576 *uStackFrame.pu32++ = uErr;
3577 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3578 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3579 uStackFrame.pu32[2] = fEfl;
3580 }
3581 else
3582 {
3583 if (fFlags & IEM_XCPT_FLAGS_ERR)
3584 *uStackFrame.pu16++ = uErr;
3585 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3586 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3587 uStackFrame.pu16[2] = fEfl;
3588 }
3589 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3590 if (rcStrict != VINF_SUCCESS)
3591 return rcStrict;
3592
3593 /* Mark the CS selector as 'accessed'. */
3594 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3595 {
3596 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3597 if (rcStrict != VINF_SUCCESS)
3598 return rcStrict;
3599 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3600 }
3601
3602 /*
3603 * Start committing the register changes (joins with the other branch).
3604 */
3605 pCtx->rsp = uNewRsp;
3606 }
3607
3608 /* ... register committing continues. */
3609 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3610 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3611 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3612 pCtx->cs.u32Limit = cbLimitCS;
3613 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3614 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3615
3616 pCtx->rip = uNewEip;
3617 fEfl &= ~fEflToClear;
3618 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3619
3620 if (fFlags & IEM_XCPT_FLAGS_CR2)
3621 pCtx->cr2 = uCr2;
3622
3623 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3624 iemRaiseXcptAdjustState(pCtx, u8Vector);
3625
3626 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3627}
3628
3629
3630/**
3631 * Implements exceptions and interrupts for long mode.
3632 *
3633 * @returns VBox strict status code.
3634 * @param pIemCpu The IEM per CPU instance data.
3635 * @param pCtx The CPU context.
3636 * @param cbInstr The number of bytes to offset rIP by in the return
3637 * address.
3638 * @param u8Vector The interrupt / exception vector number.
3639 * @param fFlags The flags.
3640 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3641 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3642 */
3643IEM_STATIC VBOXSTRICTRC
3644iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3645 PCPUMCTX pCtx,
3646 uint8_t cbInstr,
3647 uint8_t u8Vector,
3648 uint32_t fFlags,
3649 uint16_t uErr,
3650 uint64_t uCr2)
3651{
3652 /*
3653 * Read the IDT entry.
3654 */
3655 uint16_t offIdt = (uint16_t)u8Vector << 4;
3656 if (pCtx->idtr.cbIdt < offIdt + 7)
3657 {
3658 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3659 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3660 }
3661 X86DESC64 Idte;
3662 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3663 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3664 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3665 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3666 return rcStrict;
3667 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3668 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3669 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3670
3671 /*
3672 * Check the descriptor type, DPL and such.
3673 * ASSUMES this is done in the same order as described for call-gate calls.
3674 */
3675 if (Idte.Gate.u1DescType)
3676 {
3677 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3678 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3679 }
3680 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3681 switch (Idte.Gate.u4Type)
3682 {
3683 case AMD64_SEL_TYPE_SYS_INT_GATE:
3684 fEflToClear |= X86_EFL_IF;
3685 break;
3686 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3687 break;
3688
3689 default:
3690 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3691 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3692 }
3693
3694 /* Check DPL against CPL if applicable. */
3695 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3696 {
3697 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3698 {
3699 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3700 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3701 }
3702 }
3703
3704 /* Is it there? */
3705 if (!Idte.Gate.u1Present)
3706 {
3707 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3708 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3709 }
3710
3711 /* A null CS is bad. */
3712 RTSEL NewCS = Idte.Gate.u16Sel;
3713 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3714 {
3715 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3716 return iemRaiseGeneralProtectionFault0(pIemCpu);
3717 }
3718
3719 /* Fetch the descriptor for the new CS. */
3720 IEMSELDESC DescCS;
3721 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3722 if (rcStrict != VINF_SUCCESS)
3723 {
3724 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3725 return rcStrict;
3726 }
3727
3728 /* Must be a 64-bit code segment. */
3729 if (!DescCS.Long.Gen.u1DescType)
3730 {
3731 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3732 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3733 }
3734 if ( !DescCS.Long.Gen.u1Long
3735 || DescCS.Long.Gen.u1DefBig
3736 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3737 {
3738 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3739 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3740 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3741 }
3742
3743 /* Don't allow lowering the privilege level. For non-conforming CS
3744 selectors, the CS.DPL sets the privilege level the trap/interrupt
3745 handler runs at. For conforming CS selectors, the CPL remains
3746 unchanged, but the CS.DPL must be <= CPL. */
3747 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3748 * when CPU in Ring-0. Result \#GP? */
3749 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3750 {
3751 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3752 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3753 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3754 }
3755
3756
3757 /* Make sure the selector is present. */
3758 if (!DescCS.Legacy.Gen.u1Present)
3759 {
3760 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3761 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3762 }
3763
3764 /* Check that the new RIP is canonical. */
3765 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3766 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3767 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3768 if (!IEM_IS_CANONICAL(uNewRip))
3769 {
3770 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3771 return iemRaiseGeneralProtectionFault0(pIemCpu);
3772 }
3773
3774 /*
3775 * If the privilege level changes or if the IST isn't zero, we need to get
3776 * a new stack from the TSS.
3777 */
3778 uint64_t uNewRsp;
3779 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3780 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3781 if ( uNewCpl != pIemCpu->uCpl
3782 || Idte.Gate.u3IST != 0)
3783 {
3784 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787 }
3788 else
3789 uNewRsp = pCtx->rsp;
3790 uNewRsp &= ~(uint64_t)0xf;
3791
3792 /*
3793 * Calc the flag image to push.
3794 */
3795 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3796 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3797 fEfl &= ~X86_EFL_RF;
3798 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3799 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3800
3801 /*
3802 * Start making changes.
3803 */
3804
3805 /* Create the stack frame. */
3806 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3807 RTPTRUNION uStackFrame;
3808 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3809 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3810 if (rcStrict != VINF_SUCCESS)
3811 return rcStrict;
3812 void * const pvStackFrame = uStackFrame.pv;
3813
3814 if (fFlags & IEM_XCPT_FLAGS_ERR)
3815 *uStackFrame.pu64++ = uErr;
3816 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3817 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3818 uStackFrame.pu64[2] = fEfl;
3819 uStackFrame.pu64[3] = pCtx->rsp;
3820 uStackFrame.pu64[4] = pCtx->ss.Sel;
3821 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3822 if (rcStrict != VINF_SUCCESS)
3823 return rcStrict;
3824
3825 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3826 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3827 * after pushing the stack frame? (Write protect the gdt + stack to
3828 * find out.) */
3829 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3830 {
3831 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3832 if (rcStrict != VINF_SUCCESS)
3833 return rcStrict;
3834 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3835 }
3836
3837 /*
3838 * Start comitting the register changes.
3839 */
3840 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3841 * hidden registers when interrupting 32-bit or 16-bit code! */
3842 if (uNewCpl != pIemCpu->uCpl)
3843 {
3844 pCtx->ss.Sel = 0 | uNewCpl;
3845 pCtx->ss.ValidSel = 0 | uNewCpl;
3846 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3847 pCtx->ss.u32Limit = UINT32_MAX;
3848 pCtx->ss.u64Base = 0;
3849 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3850 }
3851 pCtx->rsp = uNewRsp - cbStackFrame;
3852 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3853 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3854 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3855 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3856 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3857 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3858 pCtx->rip = uNewRip;
3859 pIemCpu->uCpl = uNewCpl;
3860
3861 fEfl &= ~fEflToClear;
3862 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3863
3864 if (fFlags & IEM_XCPT_FLAGS_CR2)
3865 pCtx->cr2 = uCr2;
3866
3867 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3868 iemRaiseXcptAdjustState(pCtx, u8Vector);
3869
3870 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3871}
3872
3873
3874/**
3875 * Implements exceptions and interrupts.
3876 *
3877 * All exceptions and interrupts goes thru this function!
3878 *
3879 * @returns VBox strict status code.
3880 * @param pIemCpu The IEM per CPU instance data.
3881 * @param cbInstr The number of bytes to offset rIP by in the return
3882 * address.
3883 * @param u8Vector The interrupt / exception vector number.
3884 * @param fFlags The flags.
3885 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3886 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3887 */
3888DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3889iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3890 uint8_t cbInstr,
3891 uint8_t u8Vector,
3892 uint32_t fFlags,
3893 uint16_t uErr,
3894 uint64_t uCr2)
3895{
3896 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3897#ifdef IN_RING0
3898 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3899 AssertRCReturn(rc, rc);
3900#endif
3901
3902 /*
3903 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3904 */
3905 if ( pCtx->eflags.Bits.u1VM
3906 && pCtx->eflags.Bits.u2IOPL != 3
3907 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3908 && (pCtx->cr0 & X86_CR0_PE) )
3909 {
3910 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3911 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3912 u8Vector = X86_XCPT_GP;
3913 uErr = 0;
3914 }
3915#ifdef DBGFTRACE_ENABLED
3916 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3917 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3918 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3919#endif
3920
3921 /*
3922 * Do recursion accounting.
3923 */
3924 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3925 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3926 if (pIemCpu->cXcptRecursions == 0)
3927 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3928 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3929 else
3930 {
3931 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3932 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3933
3934 /** @todo double and tripple faults. */
3935 if (pIemCpu->cXcptRecursions >= 3)
3936 {
3937#ifdef DEBUG_bird
3938 AssertFailed();
3939#endif
3940 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3941 }
3942
3943 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
3944 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
3945 {
3946 ....
3947 } */
3948 }
3949 pIemCpu->cXcptRecursions++;
3950 pIemCpu->uCurXcpt = u8Vector;
3951 pIemCpu->fCurXcpt = fFlags;
3952
3953 /*
3954 * Extensive logging.
3955 */
3956#if defined(LOG_ENABLED) && defined(IN_RING3)
3957 if (LogIs3Enabled())
3958 {
3959 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3960 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3961 char szRegs[4096];
3962 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3963 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3964 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3965 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3966 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3967 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3968 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3969 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3970 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3971 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3972 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3973 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3974 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3975 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3976 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3977 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3978 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3979 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3980 " efer=%016VR{efer}\n"
3981 " pat=%016VR{pat}\n"
3982 " sf_mask=%016VR{sf_mask}\n"
3983 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3984 " lstar=%016VR{lstar}\n"
3985 " star=%016VR{star} cstar=%016VR{cstar}\n"
3986 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3987 );
3988
3989 char szInstr[256];
3990 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3991 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3992 szInstr, sizeof(szInstr), NULL);
3993 Log3(("%s%s\n", szRegs, szInstr));
3994 }
3995#endif /* LOG_ENABLED */
3996
3997 /*
3998 * Call the mode specific worker function.
3999 */
4000 VBOXSTRICTRC rcStrict;
4001 if (!(pCtx->cr0 & X86_CR0_PE))
4002 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4003 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4004 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4005 else
4006 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4007
4008 /*
4009 * Unwind.
4010 */
4011 pIemCpu->cXcptRecursions--;
4012 pIemCpu->uCurXcpt = uPrevXcpt;
4013 pIemCpu->fCurXcpt = fPrevXcpt;
4014 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4015 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4016 return rcStrict;
4017}
4018
4019
4020/** \#DE - 00. */
4021DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4022{
4023 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4024}
4025
4026
4027/** \#DB - 01.
4028 * @note This automatically clear DR7.GD. */
4029DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4030{
4031 /** @todo set/clear RF. */
4032 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4033 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4034}
4035
4036
4037/** \#UD - 06. */
4038DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4039{
4040 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4041}
4042
4043
4044/** \#NM - 07. */
4045DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4046{
4047 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4048}
4049
4050
4051/** \#TS(err) - 0a. */
4052DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4053{
4054 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4055}
4056
4057
4058/** \#TS(tr) - 0a. */
4059DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4060{
4061 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4062 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4063}
4064
4065
4066/** \#TS(0) - 0a. */
4067DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4068{
4069 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4070 0, 0);
4071}
4072
4073
4074/** \#TS(err) - 0a. */
4075DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4076{
4077 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4078 uSel & X86_SEL_MASK_OFF_RPL, 0);
4079}
4080
4081
4082/** \#NP(err) - 0b. */
4083DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4084{
4085 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4086}
4087
4088
4089/** \#NP(seg) - 0b. */
4090DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4091{
4092 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4093 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4094}
4095
4096
4097/** \#NP(sel) - 0b. */
4098DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4099{
4100 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4101 uSel & ~X86_SEL_RPL, 0);
4102}
4103
4104
4105/** \#SS(seg) - 0c. */
4106DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4107{
4108 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4109 uSel & ~X86_SEL_RPL, 0);
4110}
4111
4112
4113/** \#SS(err) - 0c. */
4114DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4115{
4116 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4117}
4118
4119
4120/** \#GP(n) - 0d. */
4121DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4122{
4123 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4124}
4125
4126
4127/** \#GP(0) - 0d. */
4128DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4129{
4130 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4131}
4132
4133
4134/** \#GP(sel) - 0d. */
4135DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4136{
4137 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4138 Sel & ~X86_SEL_RPL, 0);
4139}
4140
4141
4142/** \#GP(0) - 0d. */
4143DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4144{
4145 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4146}
4147
4148
4149/** \#GP(sel) - 0d. */
4150DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4151{
4152 NOREF(iSegReg); NOREF(fAccess);
4153 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4154 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4155}
4156
4157
4158/** \#GP(sel) - 0d. */
4159DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4160{
4161 NOREF(Sel);
4162 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4163}
4164
4165
4166/** \#GP(sel) - 0d. */
4167DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4168{
4169 NOREF(iSegReg); NOREF(fAccess);
4170 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4171}
4172
4173
4174/** \#PF(n) - 0e. */
4175DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4176{
4177 uint16_t uErr;
4178 switch (rc)
4179 {
4180 case VERR_PAGE_NOT_PRESENT:
4181 case VERR_PAGE_TABLE_NOT_PRESENT:
4182 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4183 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4184 uErr = 0;
4185 break;
4186
4187 default:
4188 AssertMsgFailed(("%Rrc\n", rc));
4189 case VERR_ACCESS_DENIED:
4190 uErr = X86_TRAP_PF_P;
4191 break;
4192
4193 /** @todo reserved */
4194 }
4195
4196 if (pIemCpu->uCpl == 3)
4197 uErr |= X86_TRAP_PF_US;
4198
4199 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4200 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4201 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4202 uErr |= X86_TRAP_PF_ID;
4203
4204#if 0 /* This is so much non-sense, really. Why was it done like that? */
4205 /* Note! RW access callers reporting a WRITE protection fault, will clear
4206 the READ flag before calling. So, read-modify-write accesses (RW)
4207 can safely be reported as READ faults. */
4208 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4209 uErr |= X86_TRAP_PF_RW;
4210#else
4211 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4212 {
4213 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4214 uErr |= X86_TRAP_PF_RW;
4215 }
4216#endif
4217
4218 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4219 uErr, GCPtrWhere);
4220}
4221
4222
4223/** \#MF(0) - 10. */
4224DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4225{
4226 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4227}
4228
4229
4230/** \#AC(0) - 11. */
4231DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4232{
4233 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4234}
4235
4236
4237/**
4238 * Macro for calling iemCImplRaiseDivideError().
4239 *
4240 * This enables us to add/remove arguments and force different levels of
4241 * inlining as we wish.
4242 *
4243 * @return Strict VBox status code.
4244 */
4245#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4246IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4247{
4248 NOREF(cbInstr);
4249 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4250}
4251
4252
4253/**
4254 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4255 *
4256 * This enables us to add/remove arguments and force different levels of
4257 * inlining as we wish.
4258 *
4259 * @return Strict VBox status code.
4260 */
4261#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4262IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4263{
4264 NOREF(cbInstr);
4265 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4266}
4267
4268
4269/**
4270 * Macro for calling iemCImplRaiseInvalidOpcode().
4271 *
4272 * This enables us to add/remove arguments and force different levels of
4273 * inlining as we wish.
4274 *
4275 * @return Strict VBox status code.
4276 */
4277#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4278IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4279{
4280 NOREF(cbInstr);
4281 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4282}
4283
4284
4285/** @} */
4286
4287
4288/*
4289 *
4290 * Helpers routines.
4291 * Helpers routines.
4292 * Helpers routines.
4293 *
4294 */
4295
4296/**
4297 * Recalculates the effective operand size.
4298 *
4299 * @param pIemCpu The IEM state.
4300 */
4301IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4302{
4303 switch (pIemCpu->enmCpuMode)
4304 {
4305 case IEMMODE_16BIT:
4306 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4307 break;
4308 case IEMMODE_32BIT:
4309 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4310 break;
4311 case IEMMODE_64BIT:
4312 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4313 {
4314 case 0:
4315 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4316 break;
4317 case IEM_OP_PRF_SIZE_OP:
4318 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4319 break;
4320 case IEM_OP_PRF_SIZE_REX_W:
4321 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4322 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4323 break;
4324 }
4325 break;
4326 default:
4327 AssertFailed();
4328 }
4329}
4330
4331
4332/**
4333 * Sets the default operand size to 64-bit and recalculates the effective
4334 * operand size.
4335 *
4336 * @param pIemCpu The IEM state.
4337 */
4338IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4339{
4340 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4341 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4342 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4343 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4344 else
4345 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4346}
4347
4348
4349/*
4350 *
4351 * Common opcode decoders.
4352 * Common opcode decoders.
4353 * Common opcode decoders.
4354 *
4355 */
4356//#include <iprt/mem.h>
4357
4358/**
4359 * Used to add extra details about a stub case.
4360 * @param pIemCpu The IEM per CPU state.
4361 */
4362IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4363{
4364#if defined(LOG_ENABLED) && defined(IN_RING3)
4365 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4366 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4367 char szRegs[4096];
4368 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4369 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4370 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4371 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4372 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4373 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4374 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4375 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4376 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4377 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4378 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4379 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4380 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4381 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4382 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4383 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4384 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4385 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4386 " efer=%016VR{efer}\n"
4387 " pat=%016VR{pat}\n"
4388 " sf_mask=%016VR{sf_mask}\n"
4389 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4390 " lstar=%016VR{lstar}\n"
4391 " star=%016VR{star} cstar=%016VR{cstar}\n"
4392 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4393 );
4394
4395 char szInstr[256];
4396 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4397 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4398 szInstr, sizeof(szInstr), NULL);
4399
4400 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4401#else
4402 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4403#endif
4404}
4405
4406/**
4407 * Complains about a stub.
4408 *
4409 * Providing two versions of this macro, one for daily use and one for use when
4410 * working on IEM.
4411 */
4412#if 0
4413# define IEMOP_BITCH_ABOUT_STUB() \
4414 do { \
4415 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4416 iemOpStubMsg2(pIemCpu); \
4417 RTAssertPanic(); \
4418 } while (0)
4419#else
4420# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4421#endif
4422
4423/** Stubs an opcode. */
4424#define FNIEMOP_STUB(a_Name) \
4425 FNIEMOP_DEF(a_Name) \
4426 { \
4427 IEMOP_BITCH_ABOUT_STUB(); \
4428 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4429 } \
4430 typedef int ignore_semicolon
4431
4432/** Stubs an opcode. */
4433#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4434 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4435 { \
4436 IEMOP_BITCH_ABOUT_STUB(); \
4437 NOREF(a_Name0); \
4438 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4439 } \
4440 typedef int ignore_semicolon
4441
4442/** Stubs an opcode which currently should raise \#UD. */
4443#define FNIEMOP_UD_STUB(a_Name) \
4444 FNIEMOP_DEF(a_Name) \
4445 { \
4446 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4447 return IEMOP_RAISE_INVALID_OPCODE(); \
4448 } \
4449 typedef int ignore_semicolon
4450
4451/** Stubs an opcode which currently should raise \#UD. */
4452#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4453 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4454 { \
4455 NOREF(a_Name0); \
4456 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4457 return IEMOP_RAISE_INVALID_OPCODE(); \
4458 } \
4459 typedef int ignore_semicolon
4460
4461
4462
4463/** @name Register Access.
4464 * @{
4465 */
4466
4467/**
4468 * Gets a reference (pointer) to the specified hidden segment register.
4469 *
4470 * @returns Hidden register reference.
4471 * @param pIemCpu The per CPU data.
4472 * @param iSegReg The segment register.
4473 */
4474IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4475{
4476 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4477 PCPUMSELREG pSReg;
4478 switch (iSegReg)
4479 {
4480 case X86_SREG_ES: pSReg = &pCtx->es; break;
4481 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4482 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4483 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4484 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4485 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4486 default:
4487 AssertFailedReturn(NULL);
4488 }
4489#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4490 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4491 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4492#else
4493 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4494#endif
4495 return pSReg;
4496}
4497
4498
4499/**
4500 * Gets a reference (pointer) to the specified segment register (the selector
4501 * value).
4502 *
4503 * @returns Pointer to the selector variable.
4504 * @param pIemCpu The per CPU data.
4505 * @param iSegReg The segment register.
4506 */
4507IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4508{
4509 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4510 switch (iSegReg)
4511 {
4512 case X86_SREG_ES: return &pCtx->es.Sel;
4513 case X86_SREG_CS: return &pCtx->cs.Sel;
4514 case X86_SREG_SS: return &pCtx->ss.Sel;
4515 case X86_SREG_DS: return &pCtx->ds.Sel;
4516 case X86_SREG_FS: return &pCtx->fs.Sel;
4517 case X86_SREG_GS: return &pCtx->gs.Sel;
4518 }
4519 AssertFailedReturn(NULL);
4520}
4521
4522
4523/**
4524 * Fetches the selector value of a segment register.
4525 *
4526 * @returns The selector value.
4527 * @param pIemCpu The per CPU data.
4528 * @param iSegReg The segment register.
4529 */
4530IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4531{
4532 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4533 switch (iSegReg)
4534 {
4535 case X86_SREG_ES: return pCtx->es.Sel;
4536 case X86_SREG_CS: return pCtx->cs.Sel;
4537 case X86_SREG_SS: return pCtx->ss.Sel;
4538 case X86_SREG_DS: return pCtx->ds.Sel;
4539 case X86_SREG_FS: return pCtx->fs.Sel;
4540 case X86_SREG_GS: return pCtx->gs.Sel;
4541 }
4542 AssertFailedReturn(0xffff);
4543}
4544
4545
4546/**
4547 * Gets a reference (pointer) to the specified general register.
4548 *
4549 * @returns Register reference.
4550 * @param pIemCpu The per CPU data.
4551 * @param iReg The general register.
4552 */
4553IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4554{
4555 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4556 switch (iReg)
4557 {
4558 case X86_GREG_xAX: return &pCtx->rax;
4559 case X86_GREG_xCX: return &pCtx->rcx;
4560 case X86_GREG_xDX: return &pCtx->rdx;
4561 case X86_GREG_xBX: return &pCtx->rbx;
4562 case X86_GREG_xSP: return &pCtx->rsp;
4563 case X86_GREG_xBP: return &pCtx->rbp;
4564 case X86_GREG_xSI: return &pCtx->rsi;
4565 case X86_GREG_xDI: return &pCtx->rdi;
4566 case X86_GREG_x8: return &pCtx->r8;
4567 case X86_GREG_x9: return &pCtx->r9;
4568 case X86_GREG_x10: return &pCtx->r10;
4569 case X86_GREG_x11: return &pCtx->r11;
4570 case X86_GREG_x12: return &pCtx->r12;
4571 case X86_GREG_x13: return &pCtx->r13;
4572 case X86_GREG_x14: return &pCtx->r14;
4573 case X86_GREG_x15: return &pCtx->r15;
4574 }
4575 AssertFailedReturn(NULL);
4576}
4577
4578
4579/**
4580 * Gets a reference (pointer) to the specified 8-bit general register.
4581 *
4582 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4583 *
4584 * @returns Register reference.
4585 * @param pIemCpu The per CPU data.
4586 * @param iReg The register.
4587 */
4588IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4589{
4590 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4591 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4592
4593 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4594 if (iReg >= 4)
4595 pu8Reg++;
4596 return pu8Reg;
4597}
4598
4599
4600/**
4601 * Fetches the value of a 8-bit general register.
4602 *
4603 * @returns The register value.
4604 * @param pIemCpu The per CPU data.
4605 * @param iReg The register.
4606 */
4607IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4608{
4609 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4610 return *pbSrc;
4611}
4612
4613
4614/**
4615 * Fetches the value of a 16-bit general register.
4616 *
4617 * @returns The register value.
4618 * @param pIemCpu The per CPU data.
4619 * @param iReg The register.
4620 */
4621IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4622{
4623 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4624}
4625
4626
4627/**
4628 * Fetches the value of a 32-bit general register.
4629 *
4630 * @returns The register value.
4631 * @param pIemCpu The per CPU data.
4632 * @param iReg The register.
4633 */
4634IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4635{
4636 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4637}
4638
4639
4640/**
4641 * Fetches the value of a 64-bit general register.
4642 *
4643 * @returns The register value.
4644 * @param pIemCpu The per CPU data.
4645 * @param iReg The register.
4646 */
4647IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4648{
4649 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4650}
4651
4652
4653/**
4654 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4655 *
4656 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4657 * segment limit.
4658 *
4659 * @param pIemCpu The per CPU data.
4660 * @param offNextInstr The offset of the next instruction.
4661 */
4662IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4663{
4664 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4665 switch (pIemCpu->enmEffOpSize)
4666 {
4667 case IEMMODE_16BIT:
4668 {
4669 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4670 if ( uNewIp > pCtx->cs.u32Limit
4671 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4672 return iemRaiseGeneralProtectionFault0(pIemCpu);
4673 pCtx->rip = uNewIp;
4674 break;
4675 }
4676
4677 case IEMMODE_32BIT:
4678 {
4679 Assert(pCtx->rip <= UINT32_MAX);
4680 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4681
4682 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4683 if (uNewEip > pCtx->cs.u32Limit)
4684 return iemRaiseGeneralProtectionFault0(pIemCpu);
4685 pCtx->rip = uNewEip;
4686 break;
4687 }
4688
4689 case IEMMODE_64BIT:
4690 {
4691 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4692
4693 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4694 if (!IEM_IS_CANONICAL(uNewRip))
4695 return iemRaiseGeneralProtectionFault0(pIemCpu);
4696 pCtx->rip = uNewRip;
4697 break;
4698 }
4699
4700 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4701 }
4702
4703 pCtx->eflags.Bits.u1RF = 0;
4704 return VINF_SUCCESS;
4705}
4706
4707
4708/**
4709 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4710 *
4711 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4712 * segment limit.
4713 *
4714 * @returns Strict VBox status code.
4715 * @param pIemCpu The per CPU data.
4716 * @param offNextInstr The offset of the next instruction.
4717 */
4718IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4719{
4720 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4721 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4722
4723 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4724 if ( uNewIp > pCtx->cs.u32Limit
4725 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4726 return iemRaiseGeneralProtectionFault0(pIemCpu);
4727 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4728 pCtx->rip = uNewIp;
4729 pCtx->eflags.Bits.u1RF = 0;
4730
4731 return VINF_SUCCESS;
4732}
4733
4734
4735/**
4736 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4737 *
4738 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4739 * segment limit.
4740 *
4741 * @returns Strict VBox status code.
4742 * @param pIemCpu The per CPU data.
4743 * @param offNextInstr The offset of the next instruction.
4744 */
4745IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4746{
4747 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4748 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4749
4750 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4751 {
4752 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4753
4754 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4755 if (uNewEip > pCtx->cs.u32Limit)
4756 return iemRaiseGeneralProtectionFault0(pIemCpu);
4757 pCtx->rip = uNewEip;
4758 }
4759 else
4760 {
4761 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4762
4763 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4764 if (!IEM_IS_CANONICAL(uNewRip))
4765 return iemRaiseGeneralProtectionFault0(pIemCpu);
4766 pCtx->rip = uNewRip;
4767 }
4768 pCtx->eflags.Bits.u1RF = 0;
4769 return VINF_SUCCESS;
4770}
4771
4772
4773/**
4774 * Performs a near jump to the specified address.
4775 *
4776 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4777 * segment limit.
4778 *
4779 * @param pIemCpu The per CPU data.
4780 * @param uNewRip The new RIP value.
4781 */
4782IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4783{
4784 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4785 switch (pIemCpu->enmEffOpSize)
4786 {
4787 case IEMMODE_16BIT:
4788 {
4789 Assert(uNewRip <= UINT16_MAX);
4790 if ( uNewRip > pCtx->cs.u32Limit
4791 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4792 return iemRaiseGeneralProtectionFault0(pIemCpu);
4793 /** @todo Test 16-bit jump in 64-bit mode. */
4794 pCtx->rip = uNewRip;
4795 break;
4796 }
4797
4798 case IEMMODE_32BIT:
4799 {
4800 Assert(uNewRip <= UINT32_MAX);
4801 Assert(pCtx->rip <= UINT32_MAX);
4802 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4803
4804 if (uNewRip > pCtx->cs.u32Limit)
4805 return iemRaiseGeneralProtectionFault0(pIemCpu);
4806 pCtx->rip = uNewRip;
4807 break;
4808 }
4809
4810 case IEMMODE_64BIT:
4811 {
4812 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4813
4814 if (!IEM_IS_CANONICAL(uNewRip))
4815 return iemRaiseGeneralProtectionFault0(pIemCpu);
4816 pCtx->rip = uNewRip;
4817 break;
4818 }
4819
4820 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4821 }
4822
4823 pCtx->eflags.Bits.u1RF = 0;
4824 return VINF_SUCCESS;
4825}
4826
4827
4828/**
4829 * Get the address of the top of the stack.
4830 *
4831 * @param pIemCpu The per CPU data.
4832 * @param pCtx The CPU context which SP/ESP/RSP should be
4833 * read.
4834 */
4835DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4836{
4837 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4838 return pCtx->rsp;
4839 if (pCtx->ss.Attr.n.u1DefBig)
4840 return pCtx->esp;
4841 return pCtx->sp;
4842}
4843
4844
4845/**
4846 * Updates the RIP/EIP/IP to point to the next instruction.
4847 *
4848 * This function leaves the EFLAGS.RF flag alone.
4849 *
4850 * @param pIemCpu The per CPU data.
4851 * @param cbInstr The number of bytes to add.
4852 */
4853IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4854{
4855 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4856 switch (pIemCpu->enmCpuMode)
4857 {
4858 case IEMMODE_16BIT:
4859 Assert(pCtx->rip <= UINT16_MAX);
4860 pCtx->eip += cbInstr;
4861 pCtx->eip &= UINT32_C(0xffff);
4862 break;
4863
4864 case IEMMODE_32BIT:
4865 pCtx->eip += cbInstr;
4866 Assert(pCtx->rip <= UINT32_MAX);
4867 break;
4868
4869 case IEMMODE_64BIT:
4870 pCtx->rip += cbInstr;
4871 break;
4872 default: AssertFailed();
4873 }
4874}
4875
4876
4877#if 0
4878/**
4879 * Updates the RIP/EIP/IP to point to the next instruction.
4880 *
4881 * @param pIemCpu The per CPU data.
4882 */
4883IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4884{
4885 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4886}
4887#endif
4888
4889
4890
4891/**
4892 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4893 *
4894 * @param pIemCpu The per CPU data.
4895 * @param cbInstr The number of bytes to add.
4896 */
4897IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4898{
4899 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4900
4901 pCtx->eflags.Bits.u1RF = 0;
4902
4903 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4904 switch (pIemCpu->enmCpuMode)
4905 {
4906 /** @todo investigate if EIP or RIP is really incremented. */
4907 case IEMMODE_16BIT:
4908 case IEMMODE_32BIT:
4909 pCtx->eip += cbInstr;
4910 Assert(pCtx->rip <= UINT32_MAX);
4911 break;
4912
4913 case IEMMODE_64BIT:
4914 pCtx->rip += cbInstr;
4915 break;
4916 default: AssertFailed();
4917 }
4918}
4919
4920
4921/**
4922 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4923 *
4924 * @param pIemCpu The per CPU data.
4925 */
4926IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4927{
4928 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4929}
4930
4931
4932/**
4933 * Adds to the stack pointer.
4934 *
4935 * @param pIemCpu The per CPU data.
4936 * @param pCtx The CPU context which SP/ESP/RSP should be
4937 * updated.
4938 * @param cbToAdd The number of bytes to add.
4939 */
4940DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
4941{
4942 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4943 pCtx->rsp += cbToAdd;
4944 else if (pCtx->ss.Attr.n.u1DefBig)
4945 pCtx->esp += cbToAdd;
4946 else
4947 pCtx->sp += cbToAdd;
4948}
4949
4950
4951/**
4952 * Subtracts from the stack pointer.
4953 *
4954 * @param pIemCpu The per CPU data.
4955 * @param pCtx The CPU context which SP/ESP/RSP should be
4956 * updated.
4957 * @param cbToSub The number of bytes to subtract.
4958 */
4959DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4960{
4961 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4962 pCtx->rsp -= cbToSub;
4963 else if (pCtx->ss.Attr.n.u1DefBig)
4964 pCtx->esp -= cbToSub;
4965 else
4966 pCtx->sp -= cbToSub;
4967}
4968
4969
4970/**
4971 * Adds to the temporary stack pointer.
4972 *
4973 * @param pIemCpu The per CPU data.
4974 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4975 * @param cbToAdd The number of bytes to add.
4976 * @param pCtx Where to get the current stack mode.
4977 */
4978DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4979{
4980 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4981 pTmpRsp->u += cbToAdd;
4982 else if (pCtx->ss.Attr.n.u1DefBig)
4983 pTmpRsp->DWords.dw0 += cbToAdd;
4984 else
4985 pTmpRsp->Words.w0 += cbToAdd;
4986}
4987
4988
4989/**
4990 * Subtracts from the temporary stack pointer.
4991 *
4992 * @param pIemCpu The per CPU data.
4993 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4994 * @param cbToSub The number of bytes to subtract.
4995 * @param pCtx Where to get the current stack mode.
4996 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
4997 * expecting that.
4998 */
4999DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5000{
5001 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5002 pTmpRsp->u -= cbToSub;
5003 else if (pCtx->ss.Attr.n.u1DefBig)
5004 pTmpRsp->DWords.dw0 -= cbToSub;
5005 else
5006 pTmpRsp->Words.w0 -= cbToSub;
5007}
5008
5009
5010/**
5011 * Calculates the effective stack address for a push of the specified size as
5012 * well as the new RSP value (upper bits may be masked).
5013 *
5014 * @returns Effective stack addressf for the push.
5015 * @param pIemCpu The IEM per CPU data.
5016 * @param pCtx Where to get the current stack mode.
5017 * @param cbItem The size of the stack item to pop.
5018 * @param puNewRsp Where to return the new RSP value.
5019 */
5020DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5021{
5022 RTUINT64U uTmpRsp;
5023 RTGCPTR GCPtrTop;
5024 uTmpRsp.u = pCtx->rsp;
5025
5026 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5027 GCPtrTop = uTmpRsp.u -= cbItem;
5028 else if (pCtx->ss.Attr.n.u1DefBig)
5029 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5030 else
5031 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5032 *puNewRsp = uTmpRsp.u;
5033 return GCPtrTop;
5034}
5035
5036
5037/**
5038 * Gets the current stack pointer and calculates the value after a pop of the
5039 * specified size.
5040 *
5041 * @returns Current stack pointer.
5042 * @param pIemCpu The per CPU data.
5043 * @param pCtx Where to get the current stack mode.
5044 * @param cbItem The size of the stack item to pop.
5045 * @param puNewRsp Where to return the new RSP value.
5046 */
5047DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5048{
5049 RTUINT64U uTmpRsp;
5050 RTGCPTR GCPtrTop;
5051 uTmpRsp.u = pCtx->rsp;
5052
5053 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5054 {
5055 GCPtrTop = uTmpRsp.u;
5056 uTmpRsp.u += cbItem;
5057 }
5058 else if (pCtx->ss.Attr.n.u1DefBig)
5059 {
5060 GCPtrTop = uTmpRsp.DWords.dw0;
5061 uTmpRsp.DWords.dw0 += cbItem;
5062 }
5063 else
5064 {
5065 GCPtrTop = uTmpRsp.Words.w0;
5066 uTmpRsp.Words.w0 += cbItem;
5067 }
5068 *puNewRsp = uTmpRsp.u;
5069 return GCPtrTop;
5070}
5071
5072
5073/**
5074 * Calculates the effective stack address for a push of the specified size as
5075 * well as the new temporary RSP value (upper bits may be masked).
5076 *
5077 * @returns Effective stack addressf for the push.
5078 * @param pIemCpu The per CPU data.
5079 * @param pTmpRsp The temporary stack pointer. This is updated.
5080 * @param cbItem The size of the stack item to pop.
5081 * @param puNewRsp Where to return the new RSP value.
5082 */
5083DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5084{
5085 RTGCPTR GCPtrTop;
5086
5087 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5088 GCPtrTop = pTmpRsp->u -= cbItem;
5089 else if (pCtx->ss.Attr.n.u1DefBig)
5090 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5091 else
5092 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5093 return GCPtrTop;
5094}
5095
5096
5097/**
5098 * Gets the effective stack address for a pop of the specified size and
5099 * calculates and updates the temporary RSP.
5100 *
5101 * @returns Current stack pointer.
5102 * @param pIemCpu The per CPU data.
5103 * @param pTmpRsp The temporary stack pointer. This is updated.
5104 * @param pCtx Where to get the current stack mode.
5105 * @param cbItem The size of the stack item to pop.
5106 */
5107DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5108{
5109 RTGCPTR GCPtrTop;
5110 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5111 {
5112 GCPtrTop = pTmpRsp->u;
5113 pTmpRsp->u += cbItem;
5114 }
5115 else if (pCtx->ss.Attr.n.u1DefBig)
5116 {
5117 GCPtrTop = pTmpRsp->DWords.dw0;
5118 pTmpRsp->DWords.dw0 += cbItem;
5119 }
5120 else
5121 {
5122 GCPtrTop = pTmpRsp->Words.w0;
5123 pTmpRsp->Words.w0 += cbItem;
5124 }
5125 return GCPtrTop;
5126}
5127
5128/** @} */
5129
5130
5131/** @name FPU access and helpers.
5132 *
5133 * @{
5134 */
5135
5136
5137/**
5138 * Hook for preparing to use the host FPU.
5139 *
5140 * This is necessary in ring-0 and raw-mode context.
5141 *
5142 * @param pIemCpu The IEM per CPU data.
5143 */
5144DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5145{
5146#ifdef IN_RING3
5147 NOREF(pIemCpu);
5148#else
5149/** @todo RZ: FIXME */
5150//# error "Implement me"
5151#endif
5152}
5153
5154
5155/**
5156 * Hook for preparing to use the host FPU for SSE
5157 *
5158 * This is necessary in ring-0 and raw-mode context.
5159 *
5160 * @param pIemCpu The IEM per CPU data.
5161 */
5162DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5163{
5164 iemFpuPrepareUsage(pIemCpu);
5165}
5166
5167
5168/**
5169 * Stores a QNaN value into a FPU register.
5170 *
5171 * @param pReg Pointer to the register.
5172 */
5173DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5174{
5175 pReg->au32[0] = UINT32_C(0x00000000);
5176 pReg->au32[1] = UINT32_C(0xc0000000);
5177 pReg->au16[4] = UINT16_C(0xffff);
5178}
5179
5180
5181/**
5182 * Updates the FOP, FPU.CS and FPUIP registers.
5183 *
5184 * @param pIemCpu The IEM per CPU data.
5185 * @param pCtx The CPU context.
5186 * @param pFpuCtx The FPU context.
5187 */
5188DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5189{
5190 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5191 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5192 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5193 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5194 {
5195 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5196 * happens in real mode here based on the fnsave and fnstenv images. */
5197 pFpuCtx->CS = 0;
5198 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5199 }
5200 else
5201 {
5202 pFpuCtx->CS = pCtx->cs.Sel;
5203 pFpuCtx->FPUIP = pCtx->rip;
5204 }
5205}
5206
5207
5208/**
5209 * Updates the x87.DS and FPUDP registers.
5210 *
5211 * @param pIemCpu The IEM per CPU data.
5212 * @param pCtx The CPU context.
5213 * @param pFpuCtx The FPU context.
5214 * @param iEffSeg The effective segment register.
5215 * @param GCPtrEff The effective address relative to @a iEffSeg.
5216 */
5217DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5218{
5219 RTSEL sel;
5220 switch (iEffSeg)
5221 {
5222 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5223 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5224 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5225 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5226 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5227 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5228 default:
5229 AssertMsgFailed(("%d\n", iEffSeg));
5230 sel = pCtx->ds.Sel;
5231 }
5232 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5233 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5234 {
5235 pFpuCtx->DS = 0;
5236 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5237 }
5238 else
5239 {
5240 pFpuCtx->DS = sel;
5241 pFpuCtx->FPUDP = GCPtrEff;
5242 }
5243}
5244
5245
5246/**
5247 * Rotates the stack registers in the push direction.
5248 *
5249 * @param pFpuCtx The FPU context.
5250 * @remarks This is a complete waste of time, but fxsave stores the registers in
5251 * stack order.
5252 */
5253DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5254{
5255 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5256 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5257 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5258 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5259 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5260 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5261 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5262 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5263 pFpuCtx->aRegs[0].r80 = r80Tmp;
5264}
5265
5266
5267/**
5268 * Rotates the stack registers in the pop direction.
5269 *
5270 * @param pFpuCtx The FPU context.
5271 * @remarks This is a complete waste of time, but fxsave stores the registers in
5272 * stack order.
5273 */
5274DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5275{
5276 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5277 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5278 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5279 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5280 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5281 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5282 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5283 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5284 pFpuCtx->aRegs[7].r80 = r80Tmp;
5285}
5286
5287
5288/**
5289 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5290 * exception prevents it.
5291 *
5292 * @param pIemCpu The IEM per CPU data.
5293 * @param pResult The FPU operation result to push.
5294 * @param pFpuCtx The FPU context.
5295 */
5296IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5297{
5298 /* Update FSW and bail if there are pending exceptions afterwards. */
5299 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5300 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5301 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5302 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5303 {
5304 pFpuCtx->FSW = fFsw;
5305 return;
5306 }
5307
5308 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5309 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5310 {
5311 /* All is fine, push the actual value. */
5312 pFpuCtx->FTW |= RT_BIT(iNewTop);
5313 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5314 }
5315 else if (pFpuCtx->FCW & X86_FCW_IM)
5316 {
5317 /* Masked stack overflow, push QNaN. */
5318 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5319 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5320 }
5321 else
5322 {
5323 /* Raise stack overflow, don't push anything. */
5324 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5325 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5326 return;
5327 }
5328
5329 fFsw &= ~X86_FSW_TOP_MASK;
5330 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5331 pFpuCtx->FSW = fFsw;
5332
5333 iemFpuRotateStackPush(pFpuCtx);
5334}
5335
5336
5337/**
5338 * Stores a result in a FPU register and updates the FSW and FTW.
5339 *
5340 * @param pFpuCtx The FPU context.
5341 * @param pResult The result to store.
5342 * @param iStReg Which FPU register to store it in.
5343 */
5344IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5345{
5346 Assert(iStReg < 8);
5347 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5348 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5349 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5350 pFpuCtx->FTW |= RT_BIT(iReg);
5351 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5352}
5353
5354
5355/**
5356 * Only updates the FPU status word (FSW) with the result of the current
5357 * instruction.
5358 *
5359 * @param pFpuCtx The FPU context.
5360 * @param u16FSW The FSW output of the current instruction.
5361 */
5362IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5363{
5364 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5365 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5366}
5367
5368
5369/**
5370 * Pops one item off the FPU stack if no pending exception prevents it.
5371 *
5372 * @param pFpuCtx The FPU context.
5373 */
5374IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5375{
5376 /* Check pending exceptions. */
5377 uint16_t uFSW = pFpuCtx->FSW;
5378 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5379 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5380 return;
5381
5382 /* TOP--. */
5383 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5384 uFSW &= ~X86_FSW_TOP_MASK;
5385 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5386 pFpuCtx->FSW = uFSW;
5387
5388 /* Mark the previous ST0 as empty. */
5389 iOldTop >>= X86_FSW_TOP_SHIFT;
5390 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5391
5392 /* Rotate the registers. */
5393 iemFpuRotateStackPop(pFpuCtx);
5394}
5395
5396
5397/**
5398 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5399 *
5400 * @param pIemCpu The IEM per CPU data.
5401 * @param pResult The FPU operation result to push.
5402 */
5403IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5404{
5405 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5406 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5407 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5408 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5409}
5410
5411
5412/**
5413 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5414 * and sets FPUDP and FPUDS.
5415 *
5416 * @param pIemCpu The IEM per CPU data.
5417 * @param pResult The FPU operation result to push.
5418 * @param iEffSeg The effective segment register.
5419 * @param GCPtrEff The effective address relative to @a iEffSeg.
5420 */
5421IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5422{
5423 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5424 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5425 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5426 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5427 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5428}
5429
5430
5431/**
5432 * Replace ST0 with the first value and push the second onto the FPU stack,
5433 * unless a pending exception prevents it.
5434 *
5435 * @param pIemCpu The IEM per CPU data.
5436 * @param pResult The FPU operation result to store and push.
5437 */
5438IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5439{
5440 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5441 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5442 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5443
5444 /* Update FSW and bail if there are pending exceptions afterwards. */
5445 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5446 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5447 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5448 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5449 {
5450 pFpuCtx->FSW = fFsw;
5451 return;
5452 }
5453
5454 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5455 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5456 {
5457 /* All is fine, push the actual value. */
5458 pFpuCtx->FTW |= RT_BIT(iNewTop);
5459 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5460 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5461 }
5462 else if (pFpuCtx->FCW & X86_FCW_IM)
5463 {
5464 /* Masked stack overflow, push QNaN. */
5465 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5466 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5467 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5468 }
5469 else
5470 {
5471 /* Raise stack overflow, don't push anything. */
5472 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5473 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5474 return;
5475 }
5476
5477 fFsw &= ~X86_FSW_TOP_MASK;
5478 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5479 pFpuCtx->FSW = fFsw;
5480
5481 iemFpuRotateStackPush(pFpuCtx);
5482}
5483
5484
5485/**
5486 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5487 * FOP.
5488 *
5489 * @param pIemCpu The IEM per CPU data.
5490 * @param pResult The result to store.
5491 * @param iStReg Which FPU register to store it in.
5492 * @param pCtx The CPU context.
5493 */
5494IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5495{
5496 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5497 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5498 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5499 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5500}
5501
5502
5503/**
5504 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5505 * FOP, and then pops the stack.
5506 *
5507 * @param pIemCpu The IEM per CPU data.
5508 * @param pResult The result to store.
5509 * @param iStReg Which FPU register to store it in.
5510 * @param pCtx The CPU context.
5511 */
5512IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5513{
5514 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5515 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5516 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5517 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5518 iemFpuMaybePopOne(pFpuCtx);
5519}
5520
5521
5522/**
5523 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5524 * FPUDP, and FPUDS.
5525 *
5526 * @param pIemCpu The IEM per CPU data.
5527 * @param pResult The result to store.
5528 * @param iStReg Which FPU register to store it in.
5529 * @param pCtx The CPU context.
5530 * @param iEffSeg The effective memory operand selector register.
5531 * @param GCPtrEff The effective memory operand offset.
5532 */
5533IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5534{
5535 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5536 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5537 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5538 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5539 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5540}
5541
5542
5543/**
5544 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5545 * FPUDP, and FPUDS, and then pops the stack.
5546 *
5547 * @param pIemCpu The IEM per CPU data.
5548 * @param pResult The result to store.
5549 * @param iStReg Which FPU register to store it in.
5550 * @param pCtx The CPU context.
5551 * @param iEffSeg The effective memory operand selector register.
5552 * @param GCPtrEff The effective memory operand offset.
5553 */
5554IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5555 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5556{
5557 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5558 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5559 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5560 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5561 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5562 iemFpuMaybePopOne(pFpuCtx);
5563}
5564
5565
5566/**
5567 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5568 *
5569 * @param pIemCpu The IEM per CPU data.
5570 */
5571IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5572{
5573 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5574 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5575 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5576}
5577
5578
5579/**
5580 * Marks the specified stack register as free (for FFREE).
5581 *
5582 * @param pIemCpu The IEM per CPU data.
5583 * @param iStReg The register to free.
5584 */
5585IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5586{
5587 Assert(iStReg < 8);
5588 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5589 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5590 pFpuCtx->FTW &= ~RT_BIT(iReg);
5591}
5592
5593
5594/**
5595 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5596 *
5597 * @param pIemCpu The IEM per CPU data.
5598 */
5599IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5600{
5601 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5602 uint16_t uFsw = pFpuCtx->FSW;
5603 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5604 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5605 uFsw &= ~X86_FSW_TOP_MASK;
5606 uFsw |= uTop;
5607 pFpuCtx->FSW = uFsw;
5608}
5609
5610
5611/**
5612 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5613 *
5614 * @param pIemCpu The IEM per CPU data.
5615 */
5616IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5617{
5618 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5619 uint16_t uFsw = pFpuCtx->FSW;
5620 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5621 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5622 uFsw &= ~X86_FSW_TOP_MASK;
5623 uFsw |= uTop;
5624 pFpuCtx->FSW = uFsw;
5625}
5626
5627
5628/**
5629 * Updates the FSW, FOP, FPUIP, and FPUCS.
5630 *
5631 * @param pIemCpu The IEM per CPU data.
5632 * @param u16FSW The FSW from the current instruction.
5633 */
5634IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5635{
5636 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5637 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5638 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5639 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5640}
5641
5642
5643/**
5644 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5645 *
5646 * @param pIemCpu The IEM per CPU data.
5647 * @param u16FSW The FSW from the current instruction.
5648 */
5649IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5650{
5651 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5652 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5653 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5654 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5655 iemFpuMaybePopOne(pFpuCtx);
5656}
5657
5658
5659/**
5660 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5661 *
5662 * @param pIemCpu The IEM per CPU data.
5663 * @param u16FSW The FSW from the current instruction.
5664 * @param iEffSeg The effective memory operand selector register.
5665 * @param GCPtrEff The effective memory operand offset.
5666 */
5667IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5668{
5669 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5670 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5671 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5672 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5673 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5674}
5675
5676
5677/**
5678 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5679 *
5680 * @param pIemCpu The IEM per CPU data.
5681 * @param u16FSW The FSW from the current instruction.
5682 */
5683IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5684{
5685 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5686 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5687 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5688 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5689 iemFpuMaybePopOne(pFpuCtx);
5690 iemFpuMaybePopOne(pFpuCtx);
5691}
5692
5693
5694/**
5695 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5696 *
5697 * @param pIemCpu The IEM per CPU data.
5698 * @param u16FSW The FSW from the current instruction.
5699 * @param iEffSeg The effective memory operand selector register.
5700 * @param GCPtrEff The effective memory operand offset.
5701 */
5702IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5703{
5704 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5705 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5706 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5707 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5708 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5709 iemFpuMaybePopOne(pFpuCtx);
5710}
5711
5712
5713/**
5714 * Worker routine for raising an FPU stack underflow exception.
5715 *
5716 * @param pIemCpu The IEM per CPU data.
5717 * @param pFpuCtx The FPU context.
5718 * @param iStReg The stack register being accessed.
5719 */
5720IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5721{
5722 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5723 if (pFpuCtx->FCW & X86_FCW_IM)
5724 {
5725 /* Masked underflow. */
5726 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5727 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5728 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5729 if (iStReg != UINT8_MAX)
5730 {
5731 pFpuCtx->FTW |= RT_BIT(iReg);
5732 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5733 }
5734 }
5735 else
5736 {
5737 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5738 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5739 }
5740}
5741
5742
5743/**
5744 * Raises a FPU stack underflow exception.
5745 *
5746 * @param pIemCpu The IEM per CPU data.
5747 * @param iStReg The destination register that should be loaded
5748 * with QNaN if \#IS is not masked. Specify
5749 * UINT8_MAX if none (like for fcom).
5750 */
5751DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5752{
5753 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5754 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5755 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5756 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5757}
5758
5759
5760DECL_NO_INLINE(IEM_STATIC, void)
5761iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5762{
5763 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5764 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5765 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5766 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5767 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5768}
5769
5770
5771DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5772{
5773 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5774 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5775 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5776 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5777 iemFpuMaybePopOne(pFpuCtx);
5778}
5779
5780
5781DECL_NO_INLINE(IEM_STATIC, void)
5782iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5783{
5784 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5785 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5786 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5787 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5788 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5789 iemFpuMaybePopOne(pFpuCtx);
5790}
5791
5792
5793DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5794{
5795 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5796 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5797 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5798 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5799 iemFpuMaybePopOne(pFpuCtx);
5800 iemFpuMaybePopOne(pFpuCtx);
5801}
5802
5803
5804DECL_NO_INLINE(IEM_STATIC, void)
5805iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5806{
5807 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5808 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5809 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5810
5811 if (pFpuCtx->FCW & X86_FCW_IM)
5812 {
5813 /* Masked overflow - Push QNaN. */
5814 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5815 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5816 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5817 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5818 pFpuCtx->FTW |= RT_BIT(iNewTop);
5819 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5820 iemFpuRotateStackPush(pFpuCtx);
5821 }
5822 else
5823 {
5824 /* Exception pending - don't change TOP or the register stack. */
5825 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5826 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5827 }
5828}
5829
5830
5831DECL_NO_INLINE(IEM_STATIC, void)
5832iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5833{
5834 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5835 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5836 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5837
5838 if (pFpuCtx->FCW & X86_FCW_IM)
5839 {
5840 /* Masked overflow - Push QNaN. */
5841 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5842 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5843 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5844 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5845 pFpuCtx->FTW |= RT_BIT(iNewTop);
5846 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5847 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5848 iemFpuRotateStackPush(pFpuCtx);
5849 }
5850 else
5851 {
5852 /* Exception pending - don't change TOP or the register stack. */
5853 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5854 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5855 }
5856}
5857
5858
5859/**
5860 * Worker routine for raising an FPU stack overflow exception on a push.
5861 *
5862 * @param pFpuCtx The FPU context.
5863 */
5864IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5865{
5866 if (pFpuCtx->FCW & X86_FCW_IM)
5867 {
5868 /* Masked overflow. */
5869 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5870 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5871 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5872 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5873 pFpuCtx->FTW |= RT_BIT(iNewTop);
5874 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5875 iemFpuRotateStackPush(pFpuCtx);
5876 }
5877 else
5878 {
5879 /* Exception pending - don't change TOP or the register stack. */
5880 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5881 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5882 }
5883}
5884
5885
5886/**
5887 * Raises a FPU stack overflow exception on a push.
5888 *
5889 * @param pIemCpu The IEM per CPU data.
5890 */
5891DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5892{
5893 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5894 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5895 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5896 iemFpuStackPushOverflowOnly(pFpuCtx);
5897}
5898
5899
5900/**
5901 * Raises a FPU stack overflow exception on a push with a memory operand.
5902 *
5903 * @param pIemCpu The IEM per CPU data.
5904 * @param iEffSeg The effective memory operand selector register.
5905 * @param GCPtrEff The effective memory operand offset.
5906 */
5907DECL_NO_INLINE(IEM_STATIC, void)
5908iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5909{
5910 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5911 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5912 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5913 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5914 iemFpuStackPushOverflowOnly(pFpuCtx);
5915}
5916
5917
5918IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5919{
5920 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5921 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5922 if (pFpuCtx->FTW & RT_BIT(iReg))
5923 return VINF_SUCCESS;
5924 return VERR_NOT_FOUND;
5925}
5926
5927
5928IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5929{
5930 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5931 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5932 if (pFpuCtx->FTW & RT_BIT(iReg))
5933 {
5934 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
5935 return VINF_SUCCESS;
5936 }
5937 return VERR_NOT_FOUND;
5938}
5939
5940
5941IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5942 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5943{
5944 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5945 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5946 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5947 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5948 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5949 {
5950 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5951 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
5952 return VINF_SUCCESS;
5953 }
5954 return VERR_NOT_FOUND;
5955}
5956
5957
5958IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5959{
5960 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5961 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5962 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5963 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5964 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5965 {
5966 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5967 return VINF_SUCCESS;
5968 }
5969 return VERR_NOT_FOUND;
5970}
5971
5972
5973/**
5974 * Updates the FPU exception status after FCW is changed.
5975 *
5976 * @param pFpuCtx The FPU context.
5977 */
5978IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
5979{
5980 uint16_t u16Fsw = pFpuCtx->FSW;
5981 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
5982 u16Fsw |= X86_FSW_ES | X86_FSW_B;
5983 else
5984 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
5985 pFpuCtx->FSW = u16Fsw;
5986}
5987
5988
5989/**
5990 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
5991 *
5992 * @returns The full FTW.
5993 * @param pFpuCtx The FPU context.
5994 */
5995IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
5996{
5997 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
5998 uint16_t u16Ftw = 0;
5999 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6000 for (unsigned iSt = 0; iSt < 8; iSt++)
6001 {
6002 unsigned const iReg = (iSt + iTop) & 7;
6003 if (!(u8Ftw & RT_BIT(iReg)))
6004 u16Ftw |= 3 << (iReg * 2); /* empty */
6005 else
6006 {
6007 uint16_t uTag;
6008 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6009 if (pr80Reg->s.uExponent == 0x7fff)
6010 uTag = 2; /* Exponent is all 1's => Special. */
6011 else if (pr80Reg->s.uExponent == 0x0000)
6012 {
6013 if (pr80Reg->s.u64Mantissa == 0x0000)
6014 uTag = 1; /* All bits are zero => Zero. */
6015 else
6016 uTag = 2; /* Must be special. */
6017 }
6018 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6019 uTag = 0; /* Valid. */
6020 else
6021 uTag = 2; /* Must be special. */
6022
6023 u16Ftw |= uTag << (iReg * 2); /* empty */
6024 }
6025 }
6026
6027 return u16Ftw;
6028}
6029
6030
6031/**
6032 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6033 *
6034 * @returns The compressed FTW.
6035 * @param u16FullFtw The full FTW to convert.
6036 */
6037IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6038{
6039 uint8_t u8Ftw = 0;
6040 for (unsigned i = 0; i < 8; i++)
6041 {
6042 if ((u16FullFtw & 3) != 3 /*empty*/)
6043 u8Ftw |= RT_BIT(i);
6044 u16FullFtw >>= 2;
6045 }
6046
6047 return u8Ftw;
6048}
6049
6050/** @} */
6051
6052
6053/** @name Memory access.
6054 *
6055 * @{
6056 */
6057
6058
6059/**
6060 * Updates the IEMCPU::cbWritten counter if applicable.
6061 *
6062 * @param pIemCpu The IEM per CPU data.
6063 * @param fAccess The access being accounted for.
6064 * @param cbMem The access size.
6065 */
6066DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6067{
6068 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6069 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6070 pIemCpu->cbWritten += (uint32_t)cbMem;
6071}
6072
6073
6074/**
6075 * Checks if the given segment can be written to, raise the appropriate
6076 * exception if not.
6077 *
6078 * @returns VBox strict status code.
6079 *
6080 * @param pIemCpu The IEM per CPU data.
6081 * @param pHid Pointer to the hidden register.
6082 * @param iSegReg The register number.
6083 * @param pu64BaseAddr Where to return the base address to use for the
6084 * segment. (In 64-bit code it may differ from the
6085 * base in the hidden segment.)
6086 */
6087IEM_STATIC VBOXSTRICTRC
6088iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6089{
6090 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6091 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6092 else
6093 {
6094 if (!pHid->Attr.n.u1Present)
6095 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6096
6097 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6098 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6099 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6100 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6101 *pu64BaseAddr = pHid->u64Base;
6102 }
6103 return VINF_SUCCESS;
6104}
6105
6106
6107/**
6108 * Checks if the given segment can be read from, raise the appropriate
6109 * exception if not.
6110 *
6111 * @returns VBox strict status code.
6112 *
6113 * @param pIemCpu The IEM per CPU data.
6114 * @param pHid Pointer to the hidden register.
6115 * @param iSegReg The register number.
6116 * @param pu64BaseAddr Where to return the base address to use for the
6117 * segment. (In 64-bit code it may differ from the
6118 * base in the hidden segment.)
6119 */
6120IEM_STATIC VBOXSTRICTRC
6121iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6122{
6123 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6124 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6125 else
6126 {
6127 if (!pHid->Attr.n.u1Present)
6128 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6129
6130 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6131 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6132 *pu64BaseAddr = pHid->u64Base;
6133 }
6134 return VINF_SUCCESS;
6135}
6136
6137
6138/**
6139 * Applies the segment limit, base and attributes.
6140 *
6141 * This may raise a \#GP or \#SS.
6142 *
6143 * @returns VBox strict status code.
6144 *
6145 * @param pIemCpu The IEM per CPU data.
6146 * @param fAccess The kind of access which is being performed.
6147 * @param iSegReg The index of the segment register to apply.
6148 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6149 * TSS, ++).
6150 * @param pGCPtrMem Pointer to the guest memory address to apply
6151 * segmentation to. Input and output parameter.
6152 */
6153IEM_STATIC VBOXSTRICTRC
6154iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6155{
6156 if (iSegReg == UINT8_MAX)
6157 return VINF_SUCCESS;
6158
6159 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6160 switch (pIemCpu->enmCpuMode)
6161 {
6162 case IEMMODE_16BIT:
6163 case IEMMODE_32BIT:
6164 {
6165 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6166 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6167
6168 Assert(pSel->Attr.n.u1Present);
6169 Assert(pSel->Attr.n.u1DescType);
6170 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6171 {
6172 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6173 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6174 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6175
6176 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6177 {
6178 /** @todo CPL check. */
6179 }
6180
6181 /*
6182 * There are two kinds of data selectors, normal and expand down.
6183 */
6184 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6185 {
6186 if ( GCPtrFirst32 > pSel->u32Limit
6187 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6188 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6189 }
6190 else
6191 {
6192 /*
6193 * The upper boundary is defined by the B bit, not the G bit!
6194 */
6195 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6196 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6197 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6198 }
6199 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6200 }
6201 else
6202 {
6203
6204 /*
6205 * Code selector and usually be used to read thru, writing is
6206 * only permitted in real and V8086 mode.
6207 */
6208 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6209 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6210 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6211 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6212 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6213
6214 if ( GCPtrFirst32 > pSel->u32Limit
6215 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6216 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6217
6218 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6219 {
6220 /** @todo CPL check. */
6221 }
6222
6223 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6224 }
6225 return VINF_SUCCESS;
6226 }
6227
6228 case IEMMODE_64BIT:
6229 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6230 *pGCPtrMem += pSel->u64Base;
6231 return VINF_SUCCESS;
6232
6233 default:
6234 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
6235 }
6236}
6237
6238
6239/**
6240 * Translates a virtual address to a physical physical address and checks if we
6241 * can access the page as specified.
6242 *
6243 * @param pIemCpu The IEM per CPU data.
6244 * @param GCPtrMem The virtual address.
6245 * @param fAccess The intended access.
6246 * @param pGCPhysMem Where to return the physical address.
6247 */
6248IEM_STATIC VBOXSTRICTRC
6249iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6250{
6251 /** @todo Need a different PGM interface here. We're currently using
6252 * generic / REM interfaces. this won't cut it for R0 & RC. */
6253 RTGCPHYS GCPhys;
6254 uint64_t fFlags;
6255 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6256 if (RT_FAILURE(rc))
6257 {
6258 /** @todo Check unassigned memory in unpaged mode. */
6259 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6260 *pGCPhysMem = NIL_RTGCPHYS;
6261 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6262 }
6263
6264 /* If the page is writable and does not have the no-exec bit set, all
6265 access is allowed. Otherwise we'll have to check more carefully... */
6266 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6267 {
6268 /* Write to read only memory? */
6269 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6270 && !(fFlags & X86_PTE_RW)
6271 && ( pIemCpu->uCpl != 0
6272 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6273 {
6274 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6275 *pGCPhysMem = NIL_RTGCPHYS;
6276 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6277 }
6278
6279 /* Kernel memory accessed by userland? */
6280 if ( !(fFlags & X86_PTE_US)
6281 && pIemCpu->uCpl == 3
6282 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6283 {
6284 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6285 *pGCPhysMem = NIL_RTGCPHYS;
6286 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6287 }
6288
6289 /* Executing non-executable memory? */
6290 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6291 && (fFlags & X86_PTE_PAE_NX)
6292 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6293 {
6294 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6295 *pGCPhysMem = NIL_RTGCPHYS;
6296 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6297 VERR_ACCESS_DENIED);
6298 }
6299 }
6300
6301 /*
6302 * Set the dirty / access flags.
6303 * ASSUMES this is set when the address is translated rather than on committ...
6304 */
6305 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6306 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6307 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6308 {
6309 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6310 AssertRC(rc2);
6311 }
6312
6313 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6314 *pGCPhysMem = GCPhys;
6315 return VINF_SUCCESS;
6316}
6317
6318
6319
6320/**
6321 * Maps a physical page.
6322 *
6323 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6324 * @param pIemCpu The IEM per CPU data.
6325 * @param GCPhysMem The physical address.
6326 * @param fAccess The intended access.
6327 * @param ppvMem Where to return the mapping address.
6328 * @param pLock The PGM lock.
6329 */
6330IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6331{
6332#ifdef IEM_VERIFICATION_MODE_FULL
6333 /* Force the alternative path so we can ignore writes. */
6334 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6335 {
6336 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6337 {
6338 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6339 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6340 if (RT_FAILURE(rc2))
6341 pIemCpu->fProblematicMemory = true;
6342 }
6343 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6344 }
6345#endif
6346#ifdef IEM_LOG_MEMORY_WRITES
6347 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6348 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6349#endif
6350#ifdef IEM_VERIFICATION_MODE_MINIMAL
6351 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6352#endif
6353
6354 /** @todo This API may require some improving later. A private deal with PGM
6355 * regarding locking and unlocking needs to be struct. A couple of TLBs
6356 * living in PGM, but with publicly accessible inlined access methods
6357 * could perhaps be an even better solution. */
6358 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6359 GCPhysMem,
6360 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6361 pIemCpu->fBypassHandlers,
6362 ppvMem,
6363 pLock);
6364 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6365 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6366
6367#ifdef IEM_VERIFICATION_MODE_FULL
6368 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6369 pIemCpu->fProblematicMemory = true;
6370#endif
6371 return rc;
6372}
6373
6374
6375/**
6376 * Unmap a page previously mapped by iemMemPageMap.
6377 *
6378 * @param pIemCpu The IEM per CPU data.
6379 * @param GCPhysMem The physical address.
6380 * @param fAccess The intended access.
6381 * @param pvMem What iemMemPageMap returned.
6382 * @param pLock The PGM lock.
6383 */
6384DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6385{
6386 NOREF(pIemCpu);
6387 NOREF(GCPhysMem);
6388 NOREF(fAccess);
6389 NOREF(pvMem);
6390 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6391}
6392
6393
6394/**
6395 * Looks up a memory mapping entry.
6396 *
6397 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6398 * @param pIemCpu The IEM per CPU data.
6399 * @param pvMem The memory address.
6400 * @param fAccess The access to.
6401 */
6402DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6403{
6404 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6405 if ( pIemCpu->aMemMappings[0].pv == pvMem
6406 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6407 return 0;
6408 if ( pIemCpu->aMemMappings[1].pv == pvMem
6409 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6410 return 1;
6411 if ( pIemCpu->aMemMappings[2].pv == pvMem
6412 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6413 return 2;
6414 return VERR_NOT_FOUND;
6415}
6416
6417
6418/**
6419 * Finds a free memmap entry when using iNextMapping doesn't work.
6420 *
6421 * @returns Memory mapping index, 1024 on failure.
6422 * @param pIemCpu The IEM per CPU data.
6423 */
6424IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6425{
6426 /*
6427 * The easy case.
6428 */
6429 if (pIemCpu->cActiveMappings == 0)
6430 {
6431 pIemCpu->iNextMapping = 1;
6432 return 0;
6433 }
6434
6435 /* There should be enough mappings for all instructions. */
6436 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6437
6438 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6439 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6440 return i;
6441
6442 AssertFailedReturn(1024);
6443}
6444
6445
6446/**
6447 * Commits a bounce buffer that needs writing back and unmaps it.
6448 *
6449 * @returns Strict VBox status code.
6450 * @param pIemCpu The IEM per CPU data.
6451 * @param iMemMap The index of the buffer to commit.
6452 */
6453IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6454{
6455 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6456 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6457
6458 /*
6459 * Do the writing.
6460 */
6461#ifndef IEM_VERIFICATION_MODE_MINIMAL
6462 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6463 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6464 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6465 {
6466 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6467 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6468 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6469 if (!pIemCpu->fBypassHandlers)
6470 {
6471 /*
6472 * Carefully and efficiently dealing with access handler return
6473 * codes make this a little bloated.
6474 */
6475 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6476 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6477 pbBuf,
6478 cbFirst,
6479 PGMACCESSORIGIN_IEM);
6480 if (rcStrict == VINF_SUCCESS)
6481 {
6482 if (cbSecond)
6483 {
6484 rcStrict = PGMPhysWrite(pVM,
6485 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6486 pbBuf + cbFirst,
6487 cbSecond,
6488 PGMACCESSORIGIN_IEM);
6489 if (rcStrict == VINF_SUCCESS)
6490 { /* nothing */ }
6491 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6492 {
6493 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6494 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6495 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6496 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6497 }
6498 else
6499 {
6500 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6501 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6502 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6503 return rcStrict;
6504 }
6505 }
6506 }
6507 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6508 {
6509 if (!cbSecond)
6510 {
6511 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6512 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6513 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6514 }
6515 else
6516 {
6517 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6518 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6519 pbBuf + cbFirst,
6520 cbSecond,
6521 PGMACCESSORIGIN_IEM);
6522 if (rcStrict2 == VINF_SUCCESS)
6523 {
6524 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6525 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6526 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6527 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6528 }
6529 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6530 {
6531 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6532 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6533 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6534 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6535 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6536 }
6537 else
6538 {
6539 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6540 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6541 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6542 return rcStrict2;
6543 }
6544 }
6545 }
6546 else
6547 {
6548 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6549 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6550 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6551 return rcStrict;
6552 }
6553 }
6554 else
6555 {
6556 /*
6557 * No access handlers, much simpler.
6558 */
6559 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6560 if (RT_SUCCESS(rc))
6561 {
6562 if (cbSecond)
6563 {
6564 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6565 if (RT_SUCCESS(rc))
6566 { /* likely */ }
6567 else
6568 {
6569 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6570 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6571 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6572 return rc;
6573 }
6574 }
6575 }
6576 else
6577 {
6578 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6579 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6580 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6581 return rc;
6582 }
6583 }
6584 }
6585#endif
6586
6587#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6588 /*
6589 * Record the write(s).
6590 */
6591 if (!pIemCpu->fNoRem)
6592 {
6593 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6594 if (pEvtRec)
6595 {
6596 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6597 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6598 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6599 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6600 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6601 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6602 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6603 }
6604 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6605 {
6606 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6607 if (pEvtRec)
6608 {
6609 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6610 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6611 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6612 memcpy(pEvtRec->u.RamWrite.ab,
6613 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6614 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6615 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6616 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6617 }
6618 }
6619 }
6620#endif
6621#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6622 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6623 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6624 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6625 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6626 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6627 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6628
6629 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6630 g_cbIemWrote = cbWrote;
6631 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6632#endif
6633
6634 /*
6635 * Free the mapping entry.
6636 */
6637 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6638 Assert(pIemCpu->cActiveMappings != 0);
6639 pIemCpu->cActiveMappings--;
6640 return VINF_SUCCESS;
6641}
6642
6643
6644/**
6645 * iemMemMap worker that deals with a request crossing pages.
6646 */
6647IEM_STATIC VBOXSTRICTRC
6648iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6649{
6650 /*
6651 * Do the address translations.
6652 */
6653 RTGCPHYS GCPhysFirst;
6654 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6655 if (rcStrict != VINF_SUCCESS)
6656 return rcStrict;
6657
6658/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6659 * last byte. */
6660 RTGCPHYS GCPhysSecond;
6661 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6662 if (rcStrict != VINF_SUCCESS)
6663 return rcStrict;
6664 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6665
6666 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6667#ifdef IEM_VERIFICATION_MODE_FULL
6668 /*
6669 * Detect problematic memory when verifying so we can select
6670 * the right execution engine. (TLB: Redo this.)
6671 */
6672 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6673 {
6674 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6675 if (RT_SUCCESS(rc2))
6676 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6677 if (RT_FAILURE(rc2))
6678 pIemCpu->fProblematicMemory = true;
6679 }
6680#endif
6681
6682
6683 /*
6684 * Read in the current memory content if it's a read, execute or partial
6685 * write access.
6686 */
6687 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6688 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6689 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6690
6691 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6692 {
6693 if (!pIemCpu->fBypassHandlers)
6694 {
6695 /*
6696 * Must carefully deal with access handler status codes here,
6697 * makes the code a bit bloated.
6698 */
6699 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6700 if (rcStrict == VINF_SUCCESS)
6701 {
6702 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6703 if (rcStrict == VINF_SUCCESS)
6704 { /*likely */ }
6705 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6706 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6707 else
6708 {
6709 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6710 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6711 return rcStrict;
6712 }
6713 }
6714 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6715 {
6716 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6717 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6718 {
6719 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6720 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6721 }
6722 else
6723 {
6724 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6725 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6726 return rcStrict2;
6727 }
6728 }
6729 else
6730 {
6731 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6732 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6733 return rcStrict;
6734 }
6735 }
6736 else
6737 {
6738 /*
6739 * No informational status codes here, much more straight forward.
6740 */
6741 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6742 if (RT_SUCCESS(rc))
6743 {
6744 Assert(rc == VINF_SUCCESS);
6745 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6746 if (RT_SUCCESS(rc))
6747 Assert(rc == VINF_SUCCESS);
6748 else
6749 {
6750 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6751 return rc;
6752 }
6753 }
6754 else
6755 {
6756 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6757 return rc;
6758 }
6759 }
6760
6761#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6762 if ( !pIemCpu->fNoRem
6763 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6764 {
6765 /*
6766 * Record the reads.
6767 */
6768 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6769 if (pEvtRec)
6770 {
6771 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6772 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6773 pEvtRec->u.RamRead.cb = cbFirstPage;
6774 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6775 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6776 }
6777 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6778 if (pEvtRec)
6779 {
6780 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6781 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6782 pEvtRec->u.RamRead.cb = cbSecondPage;
6783 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6784 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6785 }
6786 }
6787#endif
6788 }
6789#ifdef VBOX_STRICT
6790 else
6791 memset(pbBuf, 0xcc, cbMem);
6792 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6793 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6794#endif
6795
6796 /*
6797 * Commit the bounce buffer entry.
6798 */
6799 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6800 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6801 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6802 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6803 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6804 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6805 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6806 pIemCpu->iNextMapping = iMemMap + 1;
6807 pIemCpu->cActiveMappings++;
6808
6809 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6810 *ppvMem = pbBuf;
6811 return VINF_SUCCESS;
6812}
6813
6814
6815/**
6816 * iemMemMap woker that deals with iemMemPageMap failures.
6817 */
6818IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6819 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6820{
6821 /*
6822 * Filter out conditions we can handle and the ones which shouldn't happen.
6823 */
6824 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6825 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6826 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6827 {
6828 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
6829 return rcMap;
6830 }
6831 pIemCpu->cPotentialExits++;
6832
6833 /*
6834 * Read in the current memory content if it's a read, execute or partial
6835 * write access.
6836 */
6837 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6838 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6839 {
6840 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6841 memset(pbBuf, 0xff, cbMem);
6842 else
6843 {
6844 int rc;
6845 if (!pIemCpu->fBypassHandlers)
6846 {
6847 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6848 if (rcStrict == VINF_SUCCESS)
6849 { /* nothing */ }
6850 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6851 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6852 else
6853 {
6854 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6855 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6856 return rcStrict;
6857 }
6858 }
6859 else
6860 {
6861 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6862 if (RT_SUCCESS(rc))
6863 { /* likely */ }
6864 else
6865 {
6866 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6867 GCPhysFirst, rc));
6868 return rc;
6869 }
6870 }
6871 }
6872
6873#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6874 if ( !pIemCpu->fNoRem
6875 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6876 {
6877 /*
6878 * Record the read.
6879 */
6880 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6881 if (pEvtRec)
6882 {
6883 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6884 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6885 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6886 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6887 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6888 }
6889 }
6890#endif
6891 }
6892#ifdef VBOX_STRICT
6893 else
6894 memset(pbBuf, 0xcc, cbMem);
6895#endif
6896#ifdef VBOX_STRICT
6897 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6898 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6899#endif
6900
6901 /*
6902 * Commit the bounce buffer entry.
6903 */
6904 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6905 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6906 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6907 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6908 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6909 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6910 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6911 pIemCpu->iNextMapping = iMemMap + 1;
6912 pIemCpu->cActiveMappings++;
6913
6914 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6915 *ppvMem = pbBuf;
6916 return VINF_SUCCESS;
6917}
6918
6919
6920
6921/**
6922 * Maps the specified guest memory for the given kind of access.
6923 *
6924 * This may be using bounce buffering of the memory if it's crossing a page
6925 * boundary or if there is an access handler installed for any of it. Because
6926 * of lock prefix guarantees, we're in for some extra clutter when this
6927 * happens.
6928 *
6929 * This may raise a \#GP, \#SS, \#PF or \#AC.
6930 *
6931 * @returns VBox strict status code.
6932 *
6933 * @param pIemCpu The IEM per CPU data.
6934 * @param ppvMem Where to return the pointer to the mapped
6935 * memory.
6936 * @param cbMem The number of bytes to map. This is usually 1,
6937 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6938 * string operations it can be up to a page.
6939 * @param iSegReg The index of the segment register to use for
6940 * this access. The base and limits are checked.
6941 * Use UINT8_MAX to indicate that no segmentation
6942 * is required (for IDT, GDT and LDT accesses).
6943 * @param GCPtrMem The address of the guest memory.
6944 * @param a_fAccess How the memory is being accessed. The
6945 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6946 * how to map the memory, while the
6947 * IEM_ACCESS_WHAT_XXX bit is used when raising
6948 * exceptions.
6949 */
6950IEM_STATIC VBOXSTRICTRC
6951iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
6952{
6953 /*
6954 * Check the input and figure out which mapping entry to use.
6955 */
6956 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6957 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6958
6959 unsigned iMemMap = pIemCpu->iNextMapping;
6960 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
6961 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6962 {
6963 iMemMap = iemMemMapFindFree(pIemCpu);
6964 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
6965 }
6966
6967 /*
6968 * Map the memory, checking that we can actually access it. If something
6969 * slightly complicated happens, fall back on bounce buffering.
6970 */
6971 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6972 if (rcStrict != VINF_SUCCESS)
6973 return rcStrict;
6974
6975 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
6976 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6977
6978 RTGCPHYS GCPhysFirst;
6979 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
6980 if (rcStrict != VINF_SUCCESS)
6981 return rcStrict;
6982
6983 void *pvMem;
6984 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6985 if (rcStrict != VINF_SUCCESS)
6986 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6987
6988 /*
6989 * Fill in the mapping table entry.
6990 */
6991 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
6992 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
6993 pIemCpu->iNextMapping = iMemMap + 1;
6994 pIemCpu->cActiveMappings++;
6995
6996 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6997 *ppvMem = pvMem;
6998 return VINF_SUCCESS;
6999}
7000
7001
7002/**
7003 * Commits the guest memory if bounce buffered and unmaps it.
7004 *
7005 * @returns Strict VBox status code.
7006 * @param pIemCpu The IEM per CPU data.
7007 * @param pvMem The mapping.
7008 * @param fAccess The kind of access.
7009 */
7010IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7011{
7012 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7013 AssertReturn(iMemMap >= 0, iMemMap);
7014
7015 /* If it's bounce buffered, we may need to write back the buffer. */
7016 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7017 {
7018 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7019 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7020 }
7021 /* Otherwise unlock it. */
7022 else
7023 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7024
7025 /* Free the entry. */
7026 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7027 Assert(pIemCpu->cActiveMappings != 0);
7028 pIemCpu->cActiveMappings--;
7029 return VINF_SUCCESS;
7030}
7031
7032
7033/**
7034 * Rollbacks mappings, releasing page locks and such.
7035 *
7036 * The caller shall only call this after checking cActiveMappings.
7037 *
7038 * @returns Strict VBox status code to pass up.
7039 * @param pIemCpu The IEM per CPU data.
7040 */
7041IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7042{
7043 Assert(pIemCpu->cActiveMappings > 0);
7044
7045 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7046 while (iMemMap-- > 0)
7047 {
7048 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7049 if (fAccess != IEM_ACCESS_INVALID)
7050 {
7051 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7052 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7053 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7054 Assert(pIemCpu->cActiveMappings > 0);
7055 pIemCpu->cActiveMappings--;
7056 }
7057 }
7058}
7059
7060
7061/**
7062 * Fetches a data byte.
7063 *
7064 * @returns Strict VBox status code.
7065 * @param pIemCpu The IEM per CPU data.
7066 * @param pu8Dst Where to return the byte.
7067 * @param iSegReg The index of the segment register to use for
7068 * this access. The base and limits are checked.
7069 * @param GCPtrMem The address of the guest memory.
7070 */
7071IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7072{
7073 /* The lazy approach for now... */
7074 uint8_t const *pu8Src;
7075 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7076 if (rc == VINF_SUCCESS)
7077 {
7078 *pu8Dst = *pu8Src;
7079 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7080 }
7081 return rc;
7082}
7083
7084
7085/**
7086 * Fetches a data word.
7087 *
7088 * @returns Strict VBox status code.
7089 * @param pIemCpu The IEM per CPU data.
7090 * @param pu16Dst Where to return the word.
7091 * @param iSegReg The index of the segment register to use for
7092 * this access. The base and limits are checked.
7093 * @param GCPtrMem The address of the guest memory.
7094 */
7095IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7096{
7097 /* The lazy approach for now... */
7098 uint16_t const *pu16Src;
7099 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7100 if (rc == VINF_SUCCESS)
7101 {
7102 *pu16Dst = *pu16Src;
7103 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7104 }
7105 return rc;
7106}
7107
7108
7109/**
7110 * Fetches a data dword.
7111 *
7112 * @returns Strict VBox status code.
7113 * @param pIemCpu The IEM per CPU data.
7114 * @param pu32Dst Where to return the dword.
7115 * @param iSegReg The index of the segment register to use for
7116 * this access. The base and limits are checked.
7117 * @param GCPtrMem The address of the guest memory.
7118 */
7119IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7120{
7121 /* The lazy approach for now... */
7122 uint32_t const *pu32Src;
7123 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7124 if (rc == VINF_SUCCESS)
7125 {
7126 *pu32Dst = *pu32Src;
7127 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7128 }
7129 return rc;
7130}
7131
7132
7133#ifdef SOME_UNUSED_FUNCTION
7134/**
7135 * Fetches a data dword and sign extends it to a qword.
7136 *
7137 * @returns Strict VBox status code.
7138 * @param pIemCpu The IEM per CPU data.
7139 * @param pu64Dst Where to return the sign extended value.
7140 * @param iSegReg The index of the segment register to use for
7141 * this access. The base and limits are checked.
7142 * @param GCPtrMem The address of the guest memory.
7143 */
7144IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7145{
7146 /* The lazy approach for now... */
7147 int32_t const *pi32Src;
7148 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7149 if (rc == VINF_SUCCESS)
7150 {
7151 *pu64Dst = *pi32Src;
7152 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7153 }
7154#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7155 else
7156 *pu64Dst = 0;
7157#endif
7158 return rc;
7159}
7160#endif
7161
7162
7163/**
7164 * Fetches a data qword.
7165 *
7166 * @returns Strict VBox status code.
7167 * @param pIemCpu The IEM per CPU data.
7168 * @param pu64Dst Where to return the qword.
7169 * @param iSegReg The index of the segment register to use for
7170 * this access. The base and limits are checked.
7171 * @param GCPtrMem The address of the guest memory.
7172 */
7173IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7174{
7175 /* The lazy approach for now... */
7176 uint64_t const *pu64Src;
7177 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7178 if (rc == VINF_SUCCESS)
7179 {
7180 *pu64Dst = *pu64Src;
7181 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7182 }
7183 return rc;
7184}
7185
7186
7187/**
7188 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7189 *
7190 * @returns Strict VBox status code.
7191 * @param pIemCpu The IEM per CPU data.
7192 * @param pu64Dst Where to return the qword.
7193 * @param iSegReg The index of the segment register to use for
7194 * this access. The base and limits are checked.
7195 * @param GCPtrMem The address of the guest memory.
7196 */
7197IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7198{
7199 /* The lazy approach for now... */
7200 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7201 if (RT_UNLIKELY(GCPtrMem & 15))
7202 return iemRaiseGeneralProtectionFault0(pIemCpu);
7203
7204 uint64_t const *pu64Src;
7205 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7206 if (rc == VINF_SUCCESS)
7207 {
7208 *pu64Dst = *pu64Src;
7209 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7210 }
7211 return rc;
7212}
7213
7214
7215/**
7216 * Fetches a data tword.
7217 *
7218 * @returns Strict VBox status code.
7219 * @param pIemCpu The IEM per CPU data.
7220 * @param pr80Dst Where to return the tword.
7221 * @param iSegReg The index of the segment register to use for
7222 * this access. The base and limits are checked.
7223 * @param GCPtrMem The address of the guest memory.
7224 */
7225IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7226{
7227 /* The lazy approach for now... */
7228 PCRTFLOAT80U pr80Src;
7229 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7230 if (rc == VINF_SUCCESS)
7231 {
7232 *pr80Dst = *pr80Src;
7233 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7234 }
7235 return rc;
7236}
7237
7238
7239/**
7240 * Fetches a data dqword (double qword), generally SSE related.
7241 *
7242 * @returns Strict VBox status code.
7243 * @param pIemCpu The IEM per CPU data.
7244 * @param pu128Dst Where to return the qword.
7245 * @param iSegReg The index of the segment register to use for
7246 * this access. The base and limits are checked.
7247 * @param GCPtrMem The address of the guest memory.
7248 */
7249IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7250{
7251 /* The lazy approach for now... */
7252 uint128_t const *pu128Src;
7253 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7254 if (rc == VINF_SUCCESS)
7255 {
7256 *pu128Dst = *pu128Src;
7257 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7258 }
7259 return rc;
7260}
7261
7262
7263/**
7264 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7265 * related.
7266 *
7267 * Raises \#GP(0) if not aligned.
7268 *
7269 * @returns Strict VBox status code.
7270 * @param pIemCpu The IEM per CPU data.
7271 * @param pu128Dst Where to return the qword.
7272 * @param iSegReg The index of the segment register to use for
7273 * this access. The base and limits are checked.
7274 * @param GCPtrMem The address of the guest memory.
7275 */
7276IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7277{
7278 /* The lazy approach for now... */
7279 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7280 if ( (GCPtrMem & 15)
7281 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7282 return iemRaiseGeneralProtectionFault0(pIemCpu);
7283
7284 uint128_t const *pu128Src;
7285 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7286 if (rc == VINF_SUCCESS)
7287 {
7288 *pu128Dst = *pu128Src;
7289 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7290 }
7291 return rc;
7292}
7293
7294
7295
7296
7297/**
7298 * Fetches a descriptor register (lgdt, lidt).
7299 *
7300 * @returns Strict VBox status code.
7301 * @param pIemCpu The IEM per CPU data.
7302 * @param pcbLimit Where to return the limit.
7303 * @param pGCPTrBase Where to return the base.
7304 * @param iSegReg The index of the segment register to use for
7305 * this access. The base and limits are checked.
7306 * @param GCPtrMem The address of the guest memory.
7307 * @param enmOpSize The effective operand size.
7308 */
7309IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7310 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7311{
7312 uint8_t const *pu8Src;
7313 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7314 (void **)&pu8Src,
7315 enmOpSize == IEMMODE_64BIT
7316 ? 2 + 8
7317 : enmOpSize == IEMMODE_32BIT
7318 ? 2 + 4
7319 : 2 + 3,
7320 iSegReg,
7321 GCPtrMem,
7322 IEM_ACCESS_DATA_R);
7323 if (rcStrict == VINF_SUCCESS)
7324 {
7325 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7326 switch (enmOpSize)
7327 {
7328 case IEMMODE_16BIT:
7329 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7330 break;
7331 case IEMMODE_32BIT:
7332 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7333 break;
7334 case IEMMODE_64BIT:
7335 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7336 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7337 break;
7338
7339 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7340 }
7341 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7342 }
7343 return rcStrict;
7344}
7345
7346
7347
7348/**
7349 * Stores a data byte.
7350 *
7351 * @returns Strict VBox status code.
7352 * @param pIemCpu The IEM per CPU data.
7353 * @param iSegReg The index of the segment register to use for
7354 * this access. The base and limits are checked.
7355 * @param GCPtrMem The address of the guest memory.
7356 * @param u8Value The value to store.
7357 */
7358IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7359{
7360 /* The lazy approach for now... */
7361 uint8_t *pu8Dst;
7362 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7363 if (rc == VINF_SUCCESS)
7364 {
7365 *pu8Dst = u8Value;
7366 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7367 }
7368 return rc;
7369}
7370
7371
7372/**
7373 * Stores a data word.
7374 *
7375 * @returns Strict VBox status code.
7376 * @param pIemCpu The IEM per CPU data.
7377 * @param iSegReg The index of the segment register to use for
7378 * this access. The base and limits are checked.
7379 * @param GCPtrMem The address of the guest memory.
7380 * @param u16Value The value to store.
7381 */
7382IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7383{
7384 /* The lazy approach for now... */
7385 uint16_t *pu16Dst;
7386 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7387 if (rc == VINF_SUCCESS)
7388 {
7389 *pu16Dst = u16Value;
7390 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7391 }
7392 return rc;
7393}
7394
7395
7396/**
7397 * Stores a data dword.
7398 *
7399 * @returns Strict VBox status code.
7400 * @param pIemCpu The IEM per CPU data.
7401 * @param iSegReg The index of the segment register to use for
7402 * this access. The base and limits are checked.
7403 * @param GCPtrMem The address of the guest memory.
7404 * @param u32Value The value to store.
7405 */
7406IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7407{
7408 /* The lazy approach for now... */
7409 uint32_t *pu32Dst;
7410 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7411 if (rc == VINF_SUCCESS)
7412 {
7413 *pu32Dst = u32Value;
7414 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7415 }
7416 return rc;
7417}
7418
7419
7420/**
7421 * Stores a data qword.
7422 *
7423 * @returns Strict VBox status code.
7424 * @param pIemCpu The IEM per CPU data.
7425 * @param iSegReg The index of the segment register to use for
7426 * this access. The base and limits are checked.
7427 * @param GCPtrMem The address of the guest memory.
7428 * @param u64Value The value to store.
7429 */
7430IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7431{
7432 /* The lazy approach for now... */
7433 uint64_t *pu64Dst;
7434 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7435 if (rc == VINF_SUCCESS)
7436 {
7437 *pu64Dst = u64Value;
7438 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7439 }
7440 return rc;
7441}
7442
7443
7444/**
7445 * Stores a data dqword.
7446 *
7447 * @returns Strict VBox status code.
7448 * @param pIemCpu The IEM per CPU data.
7449 * @param iSegReg The index of the segment register to use for
7450 * this access. The base and limits are checked.
7451 * @param GCPtrMem The address of the guest memory.
7452 * @param u64Value The value to store.
7453 */
7454IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7455{
7456 /* The lazy approach for now... */
7457 uint128_t *pu128Dst;
7458 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7459 if (rc == VINF_SUCCESS)
7460 {
7461 *pu128Dst = u128Value;
7462 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7463 }
7464 return rc;
7465}
7466
7467
7468/**
7469 * Stores a data dqword, SSE aligned.
7470 *
7471 * @returns Strict VBox status code.
7472 * @param pIemCpu The IEM per CPU data.
7473 * @param iSegReg The index of the segment register to use for
7474 * this access. The base and limits are checked.
7475 * @param GCPtrMem The address of the guest memory.
7476 * @param u64Value The value to store.
7477 */
7478IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7479{
7480 /* The lazy approach for now... */
7481 if ( (GCPtrMem & 15)
7482 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7483 return iemRaiseGeneralProtectionFault0(pIemCpu);
7484
7485 uint128_t *pu128Dst;
7486 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7487 if (rc == VINF_SUCCESS)
7488 {
7489 *pu128Dst = u128Value;
7490 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7491 }
7492 return rc;
7493}
7494
7495
7496/**
7497 * Stores a descriptor register (sgdt, sidt).
7498 *
7499 * @returns Strict VBox status code.
7500 * @param pIemCpu The IEM per CPU data.
7501 * @param cbLimit The limit.
7502 * @param GCPTrBase The base address.
7503 * @param iSegReg The index of the segment register to use for
7504 * this access. The base and limits are checked.
7505 * @param GCPtrMem The address of the guest memory.
7506 * @param enmOpSize The effective operand size.
7507 */
7508IEM_STATIC VBOXSTRICTRC
7509iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7510{
7511 uint8_t *pu8Src;
7512 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7513 (void **)&pu8Src,
7514 enmOpSize == IEMMODE_64BIT
7515 ? 2 + 8
7516 : enmOpSize == IEMMODE_32BIT
7517 ? 2 + 4
7518 : 2 + 3,
7519 iSegReg,
7520 GCPtrMem,
7521 IEM_ACCESS_DATA_W);
7522 if (rcStrict == VINF_SUCCESS)
7523 {
7524 pu8Src[0] = RT_BYTE1(cbLimit);
7525 pu8Src[1] = RT_BYTE2(cbLimit);
7526 pu8Src[2] = RT_BYTE1(GCPtrBase);
7527 pu8Src[3] = RT_BYTE2(GCPtrBase);
7528 pu8Src[4] = RT_BYTE3(GCPtrBase);
7529 if (enmOpSize == IEMMODE_16BIT)
7530 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7531 else
7532 {
7533 pu8Src[5] = RT_BYTE4(GCPtrBase);
7534 if (enmOpSize == IEMMODE_64BIT)
7535 {
7536 pu8Src[6] = RT_BYTE5(GCPtrBase);
7537 pu8Src[7] = RT_BYTE6(GCPtrBase);
7538 pu8Src[8] = RT_BYTE7(GCPtrBase);
7539 pu8Src[9] = RT_BYTE8(GCPtrBase);
7540 }
7541 }
7542 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7543 }
7544 return rcStrict;
7545}
7546
7547
7548/**
7549 * Pushes a word onto the stack.
7550 *
7551 * @returns Strict VBox status code.
7552 * @param pIemCpu The IEM per CPU data.
7553 * @param u16Value The value to push.
7554 */
7555IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7556{
7557 /* Increment the stack pointer. */
7558 uint64_t uNewRsp;
7559 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7560 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7561
7562 /* Write the word the lazy way. */
7563 uint16_t *pu16Dst;
7564 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7565 if (rc == VINF_SUCCESS)
7566 {
7567 *pu16Dst = u16Value;
7568 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7569 }
7570
7571 /* Commit the new RSP value unless we an access handler made trouble. */
7572 if (rc == VINF_SUCCESS)
7573 pCtx->rsp = uNewRsp;
7574
7575 return rc;
7576}
7577
7578
7579/**
7580 * Pushes a dword onto the stack.
7581 *
7582 * @returns Strict VBox status code.
7583 * @param pIemCpu The IEM per CPU data.
7584 * @param u32Value The value to push.
7585 */
7586IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7587{
7588 /* Increment the stack pointer. */
7589 uint64_t uNewRsp;
7590 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7591 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7592
7593 /* Write the dword the lazy way. */
7594 uint32_t *pu32Dst;
7595 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7596 if (rc == VINF_SUCCESS)
7597 {
7598 *pu32Dst = u32Value;
7599 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7600 }
7601
7602 /* Commit the new RSP value unless we an access handler made trouble. */
7603 if (rc == VINF_SUCCESS)
7604 pCtx->rsp = uNewRsp;
7605
7606 return rc;
7607}
7608
7609
7610/**
7611 * Pushes a dword segment register value onto the stack.
7612 *
7613 * @returns Strict VBox status code.
7614 * @param pIemCpu The IEM per CPU data.
7615 * @param u16Value The value to push.
7616 */
7617IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7618{
7619 /* Increment the stack pointer. */
7620 uint64_t uNewRsp;
7621 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7622 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7623
7624 VBOXSTRICTRC rc;
7625 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7626 {
7627 /* The recompiler writes a full dword. */
7628 uint32_t *pu32Dst;
7629 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7630 if (rc == VINF_SUCCESS)
7631 {
7632 *pu32Dst = u32Value;
7633 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7634 }
7635 }
7636 else
7637 {
7638 /* The intel docs talks about zero extending the selector register
7639 value. My actual intel CPU here might be zero extending the value
7640 but it still only writes the lower word... */
7641 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7642 * happens when crossing an electric page boundrary, is the high word
7643 * checked for write accessibility or not? Probably it is. What about
7644 * segment limits? */
7645 uint16_t *pu16Dst;
7646 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7647 if (rc == VINF_SUCCESS)
7648 {
7649 *pu16Dst = (uint16_t)u32Value;
7650 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7651 }
7652 }
7653
7654 /* Commit the new RSP value unless we an access handler made trouble. */
7655 if (rc == VINF_SUCCESS)
7656 pCtx->rsp = uNewRsp;
7657
7658 return rc;
7659}
7660
7661
7662/**
7663 * Pushes a qword onto the stack.
7664 *
7665 * @returns Strict VBox status code.
7666 * @param pIemCpu The IEM per CPU data.
7667 * @param u64Value The value to push.
7668 */
7669IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7670{
7671 /* Increment the stack pointer. */
7672 uint64_t uNewRsp;
7673 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7674 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7675
7676 /* Write the word the lazy way. */
7677 uint64_t *pu64Dst;
7678 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7679 if (rc == VINF_SUCCESS)
7680 {
7681 *pu64Dst = u64Value;
7682 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7683 }
7684
7685 /* Commit the new RSP value unless we an access handler made trouble. */
7686 if (rc == VINF_SUCCESS)
7687 pCtx->rsp = uNewRsp;
7688
7689 return rc;
7690}
7691
7692
7693/**
7694 * Pops a word from the stack.
7695 *
7696 * @returns Strict VBox status code.
7697 * @param pIemCpu The IEM per CPU data.
7698 * @param pu16Value Where to store the popped value.
7699 */
7700IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7701{
7702 /* Increment the stack pointer. */
7703 uint64_t uNewRsp;
7704 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7705 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7706
7707 /* Write the word the lazy way. */
7708 uint16_t const *pu16Src;
7709 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7710 if (rc == VINF_SUCCESS)
7711 {
7712 *pu16Value = *pu16Src;
7713 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7714
7715 /* Commit the new RSP value. */
7716 if (rc == VINF_SUCCESS)
7717 pCtx->rsp = uNewRsp;
7718 }
7719
7720 return rc;
7721}
7722
7723
7724/**
7725 * Pops a dword from the stack.
7726 *
7727 * @returns Strict VBox status code.
7728 * @param pIemCpu The IEM per CPU data.
7729 * @param pu32Value Where to store the popped value.
7730 */
7731IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7732{
7733 /* Increment the stack pointer. */
7734 uint64_t uNewRsp;
7735 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7736 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7737
7738 /* Write the word the lazy way. */
7739 uint32_t const *pu32Src;
7740 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7741 if (rc == VINF_SUCCESS)
7742 {
7743 *pu32Value = *pu32Src;
7744 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7745
7746 /* Commit the new RSP value. */
7747 if (rc == VINF_SUCCESS)
7748 pCtx->rsp = uNewRsp;
7749 }
7750
7751 return rc;
7752}
7753
7754
7755/**
7756 * Pops a qword from the stack.
7757 *
7758 * @returns Strict VBox status code.
7759 * @param pIemCpu The IEM per CPU data.
7760 * @param pu64Value Where to store the popped value.
7761 */
7762IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7763{
7764 /* Increment the stack pointer. */
7765 uint64_t uNewRsp;
7766 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7767 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7768
7769 /* Write the word the lazy way. */
7770 uint64_t const *pu64Src;
7771 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7772 if (rc == VINF_SUCCESS)
7773 {
7774 *pu64Value = *pu64Src;
7775 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7776
7777 /* Commit the new RSP value. */
7778 if (rc == VINF_SUCCESS)
7779 pCtx->rsp = uNewRsp;
7780 }
7781
7782 return rc;
7783}
7784
7785
7786/**
7787 * Pushes a word onto the stack, using a temporary stack pointer.
7788 *
7789 * @returns Strict VBox status code.
7790 * @param pIemCpu The IEM per CPU data.
7791 * @param u16Value The value to push.
7792 * @param pTmpRsp Pointer to the temporary stack pointer.
7793 */
7794IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7795{
7796 /* Increment the stack pointer. */
7797 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7798 RTUINT64U NewRsp = *pTmpRsp;
7799 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7800
7801 /* Write the word the lazy way. */
7802 uint16_t *pu16Dst;
7803 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7804 if (rc == VINF_SUCCESS)
7805 {
7806 *pu16Dst = u16Value;
7807 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7808 }
7809
7810 /* Commit the new RSP value unless we an access handler made trouble. */
7811 if (rc == VINF_SUCCESS)
7812 *pTmpRsp = NewRsp;
7813
7814 return rc;
7815}
7816
7817
7818/**
7819 * Pushes a dword onto the stack, using a temporary stack pointer.
7820 *
7821 * @returns Strict VBox status code.
7822 * @param pIemCpu The IEM per CPU data.
7823 * @param u32Value The value to push.
7824 * @param pTmpRsp Pointer to the temporary stack pointer.
7825 */
7826IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7827{
7828 /* Increment the stack pointer. */
7829 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7830 RTUINT64U NewRsp = *pTmpRsp;
7831 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7832
7833 /* Write the word the lazy way. */
7834 uint32_t *pu32Dst;
7835 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7836 if (rc == VINF_SUCCESS)
7837 {
7838 *pu32Dst = u32Value;
7839 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7840 }
7841
7842 /* Commit the new RSP value unless we an access handler made trouble. */
7843 if (rc == VINF_SUCCESS)
7844 *pTmpRsp = NewRsp;
7845
7846 return rc;
7847}
7848
7849
7850/**
7851 * Pushes a dword onto the stack, using a temporary stack pointer.
7852 *
7853 * @returns Strict VBox status code.
7854 * @param pIemCpu The IEM per CPU data.
7855 * @param u64Value The value to push.
7856 * @param pTmpRsp Pointer to the temporary stack pointer.
7857 */
7858IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7859{
7860 /* Increment the stack pointer. */
7861 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7862 RTUINT64U NewRsp = *pTmpRsp;
7863 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7864
7865 /* Write the word the lazy way. */
7866 uint64_t *pu64Dst;
7867 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7868 if (rc == VINF_SUCCESS)
7869 {
7870 *pu64Dst = u64Value;
7871 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7872 }
7873
7874 /* Commit the new RSP value unless we an access handler made trouble. */
7875 if (rc == VINF_SUCCESS)
7876 *pTmpRsp = NewRsp;
7877
7878 return rc;
7879}
7880
7881
7882/**
7883 * Pops a word from the stack, using a temporary stack pointer.
7884 *
7885 * @returns Strict VBox status code.
7886 * @param pIemCpu The IEM per CPU data.
7887 * @param pu16Value Where to store the popped value.
7888 * @param pTmpRsp Pointer to the temporary stack pointer.
7889 */
7890IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7891{
7892 /* Increment the stack pointer. */
7893 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7894 RTUINT64U NewRsp = *pTmpRsp;
7895 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7896
7897 /* Write the word the lazy way. */
7898 uint16_t const *pu16Src;
7899 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7900 if (rc == VINF_SUCCESS)
7901 {
7902 *pu16Value = *pu16Src;
7903 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7904
7905 /* Commit the new RSP value. */
7906 if (rc == VINF_SUCCESS)
7907 *pTmpRsp = NewRsp;
7908 }
7909
7910 return rc;
7911}
7912
7913
7914/**
7915 * Pops a dword from the stack, using a temporary stack pointer.
7916 *
7917 * @returns Strict VBox status code.
7918 * @param pIemCpu The IEM per CPU data.
7919 * @param pu32Value Where to store the popped value.
7920 * @param pTmpRsp Pointer to the temporary stack pointer.
7921 */
7922IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7923{
7924 /* Increment the stack pointer. */
7925 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7926 RTUINT64U NewRsp = *pTmpRsp;
7927 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7928
7929 /* Write the word the lazy way. */
7930 uint32_t const *pu32Src;
7931 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7932 if (rc == VINF_SUCCESS)
7933 {
7934 *pu32Value = *pu32Src;
7935 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7936
7937 /* Commit the new RSP value. */
7938 if (rc == VINF_SUCCESS)
7939 *pTmpRsp = NewRsp;
7940 }
7941
7942 return rc;
7943}
7944
7945
7946/**
7947 * Pops a qword from the stack, using a temporary stack pointer.
7948 *
7949 * @returns Strict VBox status code.
7950 * @param pIemCpu The IEM per CPU data.
7951 * @param pu64Value Where to store the popped value.
7952 * @param pTmpRsp Pointer to the temporary stack pointer.
7953 */
7954IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
7955{
7956 /* Increment the stack pointer. */
7957 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7958 RTUINT64U NewRsp = *pTmpRsp;
7959 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7960
7961 /* Write the word the lazy way. */
7962 uint64_t const *pu64Src;
7963 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7964 if (rcStrict == VINF_SUCCESS)
7965 {
7966 *pu64Value = *pu64Src;
7967 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7968
7969 /* Commit the new RSP value. */
7970 if (rcStrict == VINF_SUCCESS)
7971 *pTmpRsp = NewRsp;
7972 }
7973
7974 return rcStrict;
7975}
7976
7977
7978/**
7979 * Begin a special stack push (used by interrupt, exceptions and such).
7980 *
7981 * This will raise #SS or #PF if appropriate.
7982 *
7983 * @returns Strict VBox status code.
7984 * @param pIemCpu The IEM per CPU data.
7985 * @param cbMem The number of bytes to push onto the stack.
7986 * @param ppvMem Where to return the pointer to the stack memory.
7987 * As with the other memory functions this could be
7988 * direct access or bounce buffered access, so
7989 * don't commit register until the commit call
7990 * succeeds.
7991 * @param puNewRsp Where to return the new RSP value. This must be
7992 * passed unchanged to
7993 * iemMemStackPushCommitSpecial().
7994 */
7995IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
7996{
7997 Assert(cbMem < UINT8_MAX);
7998 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7999 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8000 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8001}
8002
8003
8004/**
8005 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8006 *
8007 * This will update the rSP.
8008 *
8009 * @returns Strict VBox status code.
8010 * @param pIemCpu The IEM per CPU data.
8011 * @param pvMem The pointer returned by
8012 * iemMemStackPushBeginSpecial().
8013 * @param uNewRsp The new RSP value returned by
8014 * iemMemStackPushBeginSpecial().
8015 */
8016IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8017{
8018 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8019 if (rcStrict == VINF_SUCCESS)
8020 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8021 return rcStrict;
8022}
8023
8024
8025/**
8026 * Begin a special stack pop (used by iret, retf and such).
8027 *
8028 * This will raise \#SS or \#PF if appropriate.
8029 *
8030 * @returns Strict VBox status code.
8031 * @param pIemCpu The IEM per CPU data.
8032 * @param cbMem The number of bytes to push onto the stack.
8033 * @param ppvMem Where to return the pointer to the stack memory.
8034 * @param puNewRsp Where to return the new RSP value. This must be
8035 * passed unchanged to
8036 * iemMemStackPopCommitSpecial() or applied
8037 * manually if iemMemStackPopDoneSpecial() is used.
8038 */
8039IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8040{
8041 Assert(cbMem < UINT8_MAX);
8042 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8043 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8044 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8045}
8046
8047
8048/**
8049 * Continue a special stack pop (used by iret and retf).
8050 *
8051 * This will raise \#SS or \#PF if appropriate.
8052 *
8053 * @returns Strict VBox status code.
8054 * @param pIemCpu The IEM per CPU data.
8055 * @param cbMem The number of bytes to push onto the stack.
8056 * @param ppvMem Where to return the pointer to the stack memory.
8057 * @param puNewRsp Where to return the new RSP value. This must be
8058 * passed unchanged to
8059 * iemMemStackPopCommitSpecial() or applied
8060 * manually if iemMemStackPopDoneSpecial() is used.
8061 */
8062IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8063{
8064 Assert(cbMem < UINT8_MAX);
8065 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8066 RTUINT64U NewRsp;
8067 NewRsp.u = *puNewRsp;
8068 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8069 *puNewRsp = NewRsp.u;
8070 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8071}
8072
8073
8074/**
8075 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8076 *
8077 * This will update the rSP.
8078 *
8079 * @returns Strict VBox status code.
8080 * @param pIemCpu The IEM per CPU data.
8081 * @param pvMem The pointer returned by
8082 * iemMemStackPopBeginSpecial().
8083 * @param uNewRsp The new RSP value returned by
8084 * iemMemStackPopBeginSpecial().
8085 */
8086IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8087{
8088 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8089 if (rcStrict == VINF_SUCCESS)
8090 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8091 return rcStrict;
8092}
8093
8094
8095/**
8096 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8097 * iemMemStackPopContinueSpecial).
8098 *
8099 * The caller will manually commit the rSP.
8100 *
8101 * @returns Strict VBox status code.
8102 * @param pIemCpu The IEM per CPU data.
8103 * @param pvMem The pointer returned by
8104 * iemMemStackPopBeginSpecial() or
8105 * iemMemStackPopContinueSpecial().
8106 */
8107IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8108{
8109 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8110}
8111
8112
8113/**
8114 * Fetches a system table byte.
8115 *
8116 * @returns Strict VBox status code.
8117 * @param pIemCpu The IEM per CPU data.
8118 * @param pbDst Where to return the byte.
8119 * @param iSegReg The index of the segment register to use for
8120 * this access. The base and limits are checked.
8121 * @param GCPtrMem The address of the guest memory.
8122 */
8123IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8124{
8125 /* The lazy approach for now... */
8126 uint8_t const *pbSrc;
8127 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8128 if (rc == VINF_SUCCESS)
8129 {
8130 *pbDst = *pbSrc;
8131 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8132 }
8133 return rc;
8134}
8135
8136
8137/**
8138 * Fetches a system table word.
8139 *
8140 * @returns Strict VBox status code.
8141 * @param pIemCpu The IEM per CPU data.
8142 * @param pu16Dst Where to return the word.
8143 * @param iSegReg The index of the segment register to use for
8144 * this access. The base and limits are checked.
8145 * @param GCPtrMem The address of the guest memory.
8146 */
8147IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8148{
8149 /* The lazy approach for now... */
8150 uint16_t const *pu16Src;
8151 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8152 if (rc == VINF_SUCCESS)
8153 {
8154 *pu16Dst = *pu16Src;
8155 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8156 }
8157 return rc;
8158}
8159
8160
8161/**
8162 * Fetches a system table dword.
8163 *
8164 * @returns Strict VBox status code.
8165 * @param pIemCpu The IEM per CPU data.
8166 * @param pu32Dst Where to return the dword.
8167 * @param iSegReg The index of the segment register to use for
8168 * this access. The base and limits are checked.
8169 * @param GCPtrMem The address of the guest memory.
8170 */
8171IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8172{
8173 /* The lazy approach for now... */
8174 uint32_t const *pu32Src;
8175 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8176 if (rc == VINF_SUCCESS)
8177 {
8178 *pu32Dst = *pu32Src;
8179 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8180 }
8181 return rc;
8182}
8183
8184
8185/**
8186 * Fetches a system table qword.
8187 *
8188 * @returns Strict VBox status code.
8189 * @param pIemCpu The IEM per CPU data.
8190 * @param pu64Dst Where to return the qword.
8191 * @param iSegReg The index of the segment register to use for
8192 * this access. The base and limits are checked.
8193 * @param GCPtrMem The address of the guest memory.
8194 */
8195IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8196{
8197 /* The lazy approach for now... */
8198 uint64_t const *pu64Src;
8199 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8200 if (rc == VINF_SUCCESS)
8201 {
8202 *pu64Dst = *pu64Src;
8203 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8204 }
8205 return rc;
8206}
8207
8208
8209/**
8210 * Fetches a descriptor table entry with caller specified error code.
8211 *
8212 * @returns Strict VBox status code.
8213 * @param pIemCpu The IEM per CPU.
8214 * @param pDesc Where to return the descriptor table entry.
8215 * @param uSel The selector which table entry to fetch.
8216 * @param uXcpt The exception to raise on table lookup error.
8217 * @param uErrorCode The error code associated with the exception.
8218 */
8219IEM_STATIC VBOXSTRICTRC
8220iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8221{
8222 AssertPtr(pDesc);
8223 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8224
8225 /** @todo did the 286 require all 8 bytes to be accessible? */
8226 /*
8227 * Get the selector table base and check bounds.
8228 */
8229 RTGCPTR GCPtrBase;
8230 if (uSel & X86_SEL_LDT)
8231 {
8232 if ( !pCtx->ldtr.Attr.n.u1Present
8233 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8234 {
8235 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8236 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8237 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8238 uErrorCode, 0);
8239 }
8240
8241 Assert(pCtx->ldtr.Attr.n.u1Present);
8242 GCPtrBase = pCtx->ldtr.u64Base;
8243 }
8244 else
8245 {
8246 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8247 {
8248 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8249 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8250 uErrorCode, 0);
8251 }
8252 GCPtrBase = pCtx->gdtr.pGdt;
8253 }
8254
8255 /*
8256 * Read the legacy descriptor and maybe the long mode extensions if
8257 * required.
8258 */
8259 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8260 if (rcStrict == VINF_SUCCESS)
8261 {
8262 if ( !IEM_IS_LONG_MODE(pIemCpu)
8263 || pDesc->Legacy.Gen.u1DescType)
8264 pDesc->Long.au64[1] = 0;
8265 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8266 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8267 else
8268 {
8269 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8270 /** @todo is this the right exception? */
8271 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8272 }
8273 }
8274 return rcStrict;
8275}
8276
8277
8278/**
8279 * Fetches a descriptor table entry.
8280 *
8281 * @returns Strict VBox status code.
8282 * @param pIemCpu The IEM per CPU.
8283 * @param pDesc Where to return the descriptor table entry.
8284 * @param uSel The selector which table entry to fetch.
8285 * @param uXcpt The exception to raise on table lookup error.
8286 */
8287IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8288{
8289 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8290}
8291
8292
8293/**
8294 * Fakes a long mode stack selector for SS = 0.
8295 *
8296 * @param pDescSs Where to return the fake stack descriptor.
8297 * @param uDpl The DPL we want.
8298 */
8299IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8300{
8301 pDescSs->Long.au64[0] = 0;
8302 pDescSs->Long.au64[1] = 0;
8303 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8304 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8305 pDescSs->Long.Gen.u2Dpl = uDpl;
8306 pDescSs->Long.Gen.u1Present = 1;
8307 pDescSs->Long.Gen.u1Long = 1;
8308}
8309
8310
8311/**
8312 * Marks the selector descriptor as accessed (only non-system descriptors).
8313 *
8314 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8315 * will therefore skip the limit checks.
8316 *
8317 * @returns Strict VBox status code.
8318 * @param pIemCpu The IEM per CPU.
8319 * @param uSel The selector.
8320 */
8321IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8322{
8323 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8324
8325 /*
8326 * Get the selector table base and calculate the entry address.
8327 */
8328 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8329 ? pCtx->ldtr.u64Base
8330 : pCtx->gdtr.pGdt;
8331 GCPtr += uSel & X86_SEL_MASK;
8332
8333 /*
8334 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8335 * ugly stuff to avoid this. This will make sure it's an atomic access
8336 * as well more or less remove any question about 8-bit or 32-bit accesss.
8337 */
8338 VBOXSTRICTRC rcStrict;
8339 uint32_t volatile *pu32;
8340 if ((GCPtr & 3) == 0)
8341 {
8342 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8343 GCPtr += 2 + 2;
8344 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8345 if (rcStrict != VINF_SUCCESS)
8346 return rcStrict;
8347 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8348 }
8349 else
8350 {
8351 /* The misaligned GDT/LDT case, map the whole thing. */
8352 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8353 if (rcStrict != VINF_SUCCESS)
8354 return rcStrict;
8355 switch ((uintptr_t)pu32 & 3)
8356 {
8357 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8358 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8359 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8360 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8361 }
8362 }
8363
8364 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8365}
8366
8367/** @} */
8368
8369
8370/*
8371 * Include the C/C++ implementation of instruction.
8372 */
8373#include "IEMAllCImpl.cpp.h"
8374
8375
8376
8377/** @name "Microcode" macros.
8378 *
8379 * The idea is that we should be able to use the same code to interpret
8380 * instructions as well as recompiler instructions. Thus this obfuscation.
8381 *
8382 * @{
8383 */
8384#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8385#define IEM_MC_END() }
8386#define IEM_MC_PAUSE() do {} while (0)
8387#define IEM_MC_CONTINUE() do {} while (0)
8388
8389/** Internal macro. */
8390#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8391 do \
8392 { \
8393 VBOXSTRICTRC rcStrict2 = a_Expr; \
8394 if (rcStrict2 != VINF_SUCCESS) \
8395 return rcStrict2; \
8396 } while (0)
8397
8398#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8399#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8400#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8401#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8402#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8403#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8404#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8405
8406#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8407#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8408 do { \
8409 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8410 return iemRaiseDeviceNotAvailable(pIemCpu); \
8411 } while (0)
8412#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8413 do { \
8414 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8415 return iemRaiseMathFault(pIemCpu); \
8416 } while (0)
8417#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8418 do { \
8419 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8420 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8421 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8422 return iemRaiseUndefinedOpcode(pIemCpu); \
8423 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8424 return iemRaiseDeviceNotAvailable(pIemCpu); \
8425 } while (0)
8426#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8427 do { \
8428 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8429 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8430 return iemRaiseUndefinedOpcode(pIemCpu); \
8431 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8432 return iemRaiseDeviceNotAvailable(pIemCpu); \
8433 } while (0)
8434#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8435 do { \
8436 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8437 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8438 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8439 return iemRaiseUndefinedOpcode(pIemCpu); \
8440 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8441 return iemRaiseDeviceNotAvailable(pIemCpu); \
8442 } while (0)
8443#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8444 do { \
8445 if (pIemCpu->uCpl != 0) \
8446 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8447 } while (0)
8448
8449
8450#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8451#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8452#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8453#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8454#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8455#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8456#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8457 uint32_t a_Name; \
8458 uint32_t *a_pName = &a_Name
8459#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8460 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8461
8462#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8463#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8464
8465#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8466#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8467#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8468#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8469#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8470#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8471#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8472#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8473#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8474#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8475#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8476#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8477#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8478#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8479#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8480#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8481#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8482#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8483#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8484#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8485#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8486#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8487#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8488#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8489#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8490#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8491#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8492#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8493#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8494/** @note Not for IOPL or IF testing or modification. */
8495#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8496#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8497#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8498#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8499
8500#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8501#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8502#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8503#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8504#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8505#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8506#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8507#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8508#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8509#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8510#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8511 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8512
8513#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8514#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8515/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8516 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8517#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8518#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8519/** @note Not for IOPL or IF testing or modification. */
8520#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8521
8522#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8523#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8524#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8525 do { \
8526 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8527 *pu32Reg += (a_u32Value); \
8528 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8529 } while (0)
8530#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8531
8532#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8533#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8534#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8535 do { \
8536 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8537 *pu32Reg -= (a_u32Value); \
8538 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8539 } while (0)
8540#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8541
8542#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8543#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8544#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8545#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8546#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8547#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8548#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8549
8550#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8551#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8552#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8553#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8554
8555#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8556#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8557#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8558
8559#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8560#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8561
8562#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8563#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8564#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8565
8566#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8567#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8568#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8569
8570#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8571
8572#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8573
8574#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8575#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8576#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8577 do { \
8578 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8579 *pu32Reg &= (a_u32Value); \
8580 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8581 } while (0)
8582#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8583
8584#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8585#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8586#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8587 do { \
8588 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8589 *pu32Reg |= (a_u32Value); \
8590 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8591 } while (0)
8592#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8593
8594
8595/** @note Not for IOPL or IF modification. */
8596#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8597/** @note Not for IOPL or IF modification. */
8598#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8599/** @note Not for IOPL or IF modification. */
8600#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8601
8602#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8603
8604
8605#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8606 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8607#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8608 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8609#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8610 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8611#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8612 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8613#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8614 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8615#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8616 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8617#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8618 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8619
8620#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8621 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8622#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8623 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8624#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8625 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8626#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8627 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8628#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8629 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8630 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8631 } while (0)
8632#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8633 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8634 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8635 } while (0)
8636#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8637 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8638#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8639 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8640#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8641 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8642
8643#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8644 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8645#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8646 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8647#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8648 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8649
8650#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8651 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8652#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8653 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8654#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8655 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8656
8657#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8658 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8659#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8660 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8661#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8662 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8663
8664#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8665 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8666
8667#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8668 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8669#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8670 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8671#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8672 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8673#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8674 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8675
8676#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8678#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8680#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8682
8683#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8685#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8687
8688
8689
8690#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8691 do { \
8692 uint8_t u8Tmp; \
8693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8694 (a_u16Dst) = u8Tmp; \
8695 } while (0)
8696#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8697 do { \
8698 uint8_t u8Tmp; \
8699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8700 (a_u32Dst) = u8Tmp; \
8701 } while (0)
8702#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8703 do { \
8704 uint8_t u8Tmp; \
8705 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8706 (a_u64Dst) = u8Tmp; \
8707 } while (0)
8708#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8709 do { \
8710 uint16_t u16Tmp; \
8711 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8712 (a_u32Dst) = u16Tmp; \
8713 } while (0)
8714#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8715 do { \
8716 uint16_t u16Tmp; \
8717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8718 (a_u64Dst) = u16Tmp; \
8719 } while (0)
8720#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8721 do { \
8722 uint32_t u32Tmp; \
8723 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8724 (a_u64Dst) = u32Tmp; \
8725 } while (0)
8726
8727#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8728 do { \
8729 uint8_t u8Tmp; \
8730 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8731 (a_u16Dst) = (int8_t)u8Tmp; \
8732 } while (0)
8733#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8734 do { \
8735 uint8_t u8Tmp; \
8736 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8737 (a_u32Dst) = (int8_t)u8Tmp; \
8738 } while (0)
8739#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8740 do { \
8741 uint8_t u8Tmp; \
8742 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8743 (a_u64Dst) = (int8_t)u8Tmp; \
8744 } while (0)
8745#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8746 do { \
8747 uint16_t u16Tmp; \
8748 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8749 (a_u32Dst) = (int16_t)u16Tmp; \
8750 } while (0)
8751#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8752 do { \
8753 uint16_t u16Tmp; \
8754 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8755 (a_u64Dst) = (int16_t)u16Tmp; \
8756 } while (0)
8757#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8758 do { \
8759 uint32_t u32Tmp; \
8760 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8761 (a_u64Dst) = (int32_t)u32Tmp; \
8762 } while (0)
8763
8764#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8765 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8766#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8767 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8768#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8769 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8770#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8771 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8772
8773#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8774 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8775#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8776 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8777#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8778 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8779#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8780 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8781
8782#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8783#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8784#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8785#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8786#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8787#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8788#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8789 do { \
8790 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8791 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8792 } while (0)
8793
8794#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8795 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8796#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8797 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8798
8799
8800#define IEM_MC_PUSH_U16(a_u16Value) \
8801 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8802#define IEM_MC_PUSH_U32(a_u32Value) \
8803 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8804#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8805 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8806#define IEM_MC_PUSH_U64(a_u64Value) \
8807 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8808
8809#define IEM_MC_POP_U16(a_pu16Value) \
8810 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8811#define IEM_MC_POP_U32(a_pu32Value) \
8812 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8813#define IEM_MC_POP_U64(a_pu64Value) \
8814 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8815
8816/** Maps guest memory for direct or bounce buffered access.
8817 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8818 * @remarks May return.
8819 */
8820#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8821 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8822
8823/** Maps guest memory for direct or bounce buffered access.
8824 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8825 * @remarks May return.
8826 */
8827#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8828 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8829
8830/** Commits the memory and unmaps the guest memory.
8831 * @remarks May return.
8832 */
8833#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8834 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8835
8836/** Commits the memory and unmaps the guest memory unless the FPU status word
8837 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8838 * that would cause FLD not to store.
8839 *
8840 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8841 * store, while \#P will not.
8842 *
8843 * @remarks May in theory return - for now.
8844 */
8845#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8846 do { \
8847 if ( !(a_u16FSW & X86_FSW_ES) \
8848 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8849 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8850 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8851 } while (0)
8852
8853/** Calculate efficient address from R/M. */
8854#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8855 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8856
8857#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8858#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8859#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8860#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8861#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8862#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8863#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8864
8865/**
8866 * Defers the rest of the instruction emulation to a C implementation routine
8867 * and returns, only taking the standard parameters.
8868 *
8869 * @param a_pfnCImpl The pointer to the C routine.
8870 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8871 */
8872#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8873
8874/**
8875 * Defers the rest of instruction emulation to a C implementation routine and
8876 * returns, taking one argument in addition to the standard ones.
8877 *
8878 * @param a_pfnCImpl The pointer to the C routine.
8879 * @param a0 The argument.
8880 */
8881#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8882
8883/**
8884 * Defers the rest of the instruction emulation to a C implementation routine
8885 * and returns, taking two arguments in addition to the standard ones.
8886 *
8887 * @param a_pfnCImpl The pointer to the C routine.
8888 * @param a0 The first extra argument.
8889 * @param a1 The second extra argument.
8890 */
8891#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8892
8893/**
8894 * Defers the rest of the instruction emulation to a C implementation routine
8895 * and returns, taking three arguments in addition to the standard ones.
8896 *
8897 * @param a_pfnCImpl The pointer to the C routine.
8898 * @param a0 The first extra argument.
8899 * @param a1 The second extra argument.
8900 * @param a2 The third extra argument.
8901 */
8902#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8903
8904/**
8905 * Defers the rest of the instruction emulation to a C implementation routine
8906 * and returns, taking four arguments in addition to the standard ones.
8907 *
8908 * @param a_pfnCImpl The pointer to the C routine.
8909 * @param a0 The first extra argument.
8910 * @param a1 The second extra argument.
8911 * @param a2 The third extra argument.
8912 * @param a3 The fourth extra argument.
8913 */
8914#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8915
8916/**
8917 * Defers the rest of the instruction emulation to a C implementation routine
8918 * and returns, taking two arguments in addition to the standard ones.
8919 *
8920 * @param a_pfnCImpl The pointer to the C routine.
8921 * @param a0 The first extra argument.
8922 * @param a1 The second extra argument.
8923 * @param a2 The third extra argument.
8924 * @param a3 The fourth extra argument.
8925 * @param a4 The fifth extra argument.
8926 */
8927#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8928
8929/**
8930 * Defers the entire instruction emulation to a C implementation routine and
8931 * returns, only taking the standard parameters.
8932 *
8933 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8934 *
8935 * @param a_pfnCImpl The pointer to the C routine.
8936 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8937 */
8938#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8939
8940/**
8941 * Defers the entire instruction emulation to a C implementation routine and
8942 * returns, taking one argument in addition to the standard ones.
8943 *
8944 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8945 *
8946 * @param a_pfnCImpl The pointer to the C routine.
8947 * @param a0 The argument.
8948 */
8949#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8950
8951/**
8952 * Defers the entire instruction emulation to a C implementation routine and
8953 * returns, taking two arguments in addition to the standard ones.
8954 *
8955 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8956 *
8957 * @param a_pfnCImpl The pointer to the C routine.
8958 * @param a0 The first extra argument.
8959 * @param a1 The second extra argument.
8960 */
8961#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8962
8963/**
8964 * Defers the entire instruction emulation to a C implementation routine and
8965 * returns, taking three arguments in addition to the standard ones.
8966 *
8967 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8968 *
8969 * @param a_pfnCImpl The pointer to the C routine.
8970 * @param a0 The first extra argument.
8971 * @param a1 The second extra argument.
8972 * @param a2 The third extra argument.
8973 */
8974#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8975
8976/**
8977 * Calls a FPU assembly implementation taking one visible argument.
8978 *
8979 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8980 * @param a0 The first extra argument.
8981 */
8982#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
8983 do { \
8984 iemFpuPrepareUsage(pIemCpu); \
8985 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
8986 } while (0)
8987
8988/**
8989 * Calls a FPU assembly implementation taking two visible arguments.
8990 *
8991 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8992 * @param a0 The first extra argument.
8993 * @param a1 The second extra argument.
8994 */
8995#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
8996 do { \
8997 iemFpuPrepareUsage(pIemCpu); \
8998 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
8999 } while (0)
9000
9001/**
9002 * Calls a FPU assembly implementation taking three visible arguments.
9003 *
9004 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9005 * @param a0 The first extra argument.
9006 * @param a1 The second extra argument.
9007 * @param a2 The third extra argument.
9008 */
9009#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9010 do { \
9011 iemFpuPrepareUsage(pIemCpu); \
9012 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9013 } while (0)
9014
9015#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9016 do { \
9017 (a_FpuData).FSW = (a_FSW); \
9018 (a_FpuData).r80Result = *(a_pr80Value); \
9019 } while (0)
9020
9021/** Pushes FPU result onto the stack. */
9022#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9023 iemFpuPushResult(pIemCpu, &a_FpuData)
9024/** Pushes FPU result onto the stack and sets the FPUDP. */
9025#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9026 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9027
9028/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9029#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9030 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9031
9032/** Stores FPU result in a stack register. */
9033#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9034 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9035/** Stores FPU result in a stack register and pops the stack. */
9036#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9037 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9038/** Stores FPU result in a stack register and sets the FPUDP. */
9039#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9040 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9041/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9042 * stack. */
9043#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9044 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9045
9046/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9047#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9048 iemFpuUpdateOpcodeAndIp(pIemCpu)
9049/** Free a stack register (for FFREE and FFREEP). */
9050#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9051 iemFpuStackFree(pIemCpu, a_iStReg)
9052/** Increment the FPU stack pointer. */
9053#define IEM_MC_FPU_STACK_INC_TOP() \
9054 iemFpuStackIncTop(pIemCpu)
9055/** Decrement the FPU stack pointer. */
9056#define IEM_MC_FPU_STACK_DEC_TOP() \
9057 iemFpuStackDecTop(pIemCpu)
9058
9059/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9060#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9061 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9062/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9063#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9064 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9065/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9066#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9067 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9068/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9069#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9070 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9071/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9072 * stack. */
9073#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9074 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9075/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9076#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9077 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9078
9079/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9080#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9081 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9082/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9083 * stack. */
9084#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9085 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9086/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9087 * FPUDS. */
9088#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9089 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9090/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9091 * FPUDS. Pops stack. */
9092#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9093 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9094/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9095 * stack twice. */
9096#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9097 iemFpuStackUnderflowThenPopPop(pIemCpu)
9098/** Raises a FPU stack underflow exception for an instruction pushing a result
9099 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9100#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9101 iemFpuStackPushUnderflow(pIemCpu)
9102/** Raises a FPU stack underflow exception for an instruction pushing a result
9103 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9104#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9105 iemFpuStackPushUnderflowTwo(pIemCpu)
9106
9107/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9108 * FPUIP, FPUCS and FOP. */
9109#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9110 iemFpuStackPushOverflow(pIemCpu)
9111/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9112 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9113#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9114 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9115/** Indicates that we (might) have modified the FPU state. */
9116#define IEM_MC_USED_FPU() \
9117 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9118
9119/**
9120 * Calls a MMX assembly implementation taking two visible arguments.
9121 *
9122 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9123 * @param a0 The first extra argument.
9124 * @param a1 The second extra argument.
9125 */
9126#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9127 do { \
9128 iemFpuPrepareUsage(pIemCpu); \
9129 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9130 } while (0)
9131
9132/**
9133 * Calls a MMX assembly implementation taking three visible arguments.
9134 *
9135 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9136 * @param a0 The first extra argument.
9137 * @param a1 The second extra argument.
9138 * @param a2 The third extra argument.
9139 */
9140#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9141 do { \
9142 iemFpuPrepareUsage(pIemCpu); \
9143 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9144 } while (0)
9145
9146
9147/**
9148 * Calls a SSE assembly implementation taking two visible arguments.
9149 *
9150 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9151 * @param a0 The first extra argument.
9152 * @param a1 The second extra argument.
9153 */
9154#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9155 do { \
9156 iemFpuPrepareUsageSse(pIemCpu); \
9157 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9158 } while (0)
9159
9160/**
9161 * Calls a SSE assembly implementation taking three visible arguments.
9162 *
9163 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9164 * @param a0 The first extra argument.
9165 * @param a1 The second extra argument.
9166 * @param a2 The third extra argument.
9167 */
9168#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9169 do { \
9170 iemFpuPrepareUsageSse(pIemCpu); \
9171 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9172 } while (0)
9173
9174
9175/** @note Not for IOPL or IF testing. */
9176#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9177/** @note Not for IOPL or IF testing. */
9178#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9179/** @note Not for IOPL or IF testing. */
9180#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9181/** @note Not for IOPL or IF testing. */
9182#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9183/** @note Not for IOPL or IF testing. */
9184#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9185 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9186 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9187/** @note Not for IOPL or IF testing. */
9188#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9189 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9190 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9191/** @note Not for IOPL or IF testing. */
9192#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9193 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9194 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9195 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9196/** @note Not for IOPL or IF testing. */
9197#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9198 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9199 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9200 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9201#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9202#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9203#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9204/** @note Not for IOPL or IF testing. */
9205#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9206 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9207 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9208/** @note Not for IOPL or IF testing. */
9209#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9210 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9211 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9212/** @note Not for IOPL or IF testing. */
9213#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9214 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9215 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9216/** @note Not for IOPL or IF testing. */
9217#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9218 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9219 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9220/** @note Not for IOPL or IF testing. */
9221#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9222 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9223 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9224/** @note Not for IOPL or IF testing. */
9225#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9226 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9227 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9228#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9229#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9230#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9231 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9232#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9233 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9234#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9235 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9236#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9237 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9238#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9239 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9240#define IEM_MC_IF_FCW_IM() \
9241 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9242
9243#define IEM_MC_ELSE() } else {
9244#define IEM_MC_ENDIF() } do {} while (0)
9245
9246/** @} */
9247
9248
9249/** @name Opcode Debug Helpers.
9250 * @{
9251 */
9252#ifdef DEBUG
9253# define IEMOP_MNEMONIC(a_szMnemonic) \
9254 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9255 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9256# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9257 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9258 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9259#else
9260# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9261# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9262#endif
9263
9264/** @} */
9265
9266
9267/** @name Opcode Helpers.
9268 * @{
9269 */
9270
9271/** The instruction raises an \#UD in real and V8086 mode. */
9272#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9273 do \
9274 { \
9275 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9276 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9277 } while (0)
9278
9279/** The instruction allows no lock prefixing (in this encoding), throw #UD if
9280 * lock prefixed.
9281 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9282#define IEMOP_HLP_NO_LOCK_PREFIX() \
9283 do \
9284 { \
9285 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9286 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9287 } while (0)
9288
9289/** The instruction is not available in 64-bit mode, throw #UD if we're in
9290 * 64-bit mode. */
9291#define IEMOP_HLP_NO_64BIT() \
9292 do \
9293 { \
9294 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9295 return IEMOP_RAISE_INVALID_OPCODE(); \
9296 } while (0)
9297
9298/** The instruction is only available in 64-bit mode, throw #UD if we're not in
9299 * 64-bit mode. */
9300#define IEMOP_HLP_ONLY_64BIT() \
9301 do \
9302 { \
9303 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9304 return IEMOP_RAISE_INVALID_OPCODE(); \
9305 } while (0)
9306
9307/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9308#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9309 do \
9310 { \
9311 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9312 iemRecalEffOpSize64Default(pIemCpu); \
9313 } while (0)
9314
9315/** The instruction has 64-bit operand size if 64-bit mode. */
9316#define IEMOP_HLP_64BIT_OP_SIZE() \
9317 do \
9318 { \
9319 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9320 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9321 } while (0)
9322
9323/** Only a REX prefix immediately preceeding the first opcode byte takes
9324 * effect. This macro helps ensuring this as well as logging bad guest code. */
9325#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9326 do \
9327 { \
9328 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9329 { \
9330 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9331 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9332 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9333 pIemCpu->uRexB = 0; \
9334 pIemCpu->uRexIndex = 0; \
9335 pIemCpu->uRexReg = 0; \
9336 iemRecalEffOpSize(pIemCpu); \
9337 } \
9338 } while (0)
9339
9340/**
9341 * Done decoding.
9342 */
9343#define IEMOP_HLP_DONE_DECODING() \
9344 do \
9345 { \
9346 /*nothing for now, maybe later... */ \
9347 } while (0)
9348
9349/**
9350 * Done decoding, raise \#UD exception if lock prefix present.
9351 */
9352#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9353 do \
9354 { \
9355 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9356 { /* likely */ } \
9357 else \
9358 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9359 } while (0)
9360#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9361 do \
9362 { \
9363 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9364 { /* likely */ } \
9365 else \
9366 { \
9367 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9368 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9369 } \
9370 } while (0)
9371#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9372 do \
9373 { \
9374 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9375 { /* likely */ } \
9376 else \
9377 { \
9378 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9379 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9380 } \
9381 } while (0)
9382/**
9383 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9384 * are present.
9385 */
9386#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9387 do \
9388 { \
9389 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9390 { /* likely */ } \
9391 else \
9392 return IEMOP_RAISE_INVALID_OPCODE(); \
9393 } while (0)
9394
9395
9396/**
9397 * Calculates the effective address of a ModR/M memory operand.
9398 *
9399 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9400 *
9401 * @return Strict VBox status code.
9402 * @param pIemCpu The IEM per CPU data.
9403 * @param bRm The ModRM byte.
9404 * @param cbImm The size of any immediate following the
9405 * effective address opcode bytes. Important for
9406 * RIP relative addressing.
9407 * @param pGCPtrEff Where to return the effective address.
9408 */
9409IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9410{
9411 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9412 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9413#define SET_SS_DEF() \
9414 do \
9415 { \
9416 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9417 pIemCpu->iEffSeg = X86_SREG_SS; \
9418 } while (0)
9419
9420 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9421 {
9422/** @todo Check the effective address size crap! */
9423 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9424 {
9425 uint16_t u16EffAddr;
9426
9427 /* Handle the disp16 form with no registers first. */
9428 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9429 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9430 else
9431 {
9432 /* Get the displacment. */
9433 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9434 {
9435 case 0: u16EffAddr = 0; break;
9436 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9437 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9438 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9439 }
9440
9441 /* Add the base and index registers to the disp. */
9442 switch (bRm & X86_MODRM_RM_MASK)
9443 {
9444 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9445 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9446 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9447 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9448 case 4: u16EffAddr += pCtx->si; break;
9449 case 5: u16EffAddr += pCtx->di; break;
9450 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9451 case 7: u16EffAddr += pCtx->bx; break;
9452 }
9453 }
9454
9455 *pGCPtrEff = u16EffAddr;
9456 }
9457 else
9458 {
9459 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9460 uint32_t u32EffAddr;
9461
9462 /* Handle the disp32 form with no registers first. */
9463 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9464 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9465 else
9466 {
9467 /* Get the register (or SIB) value. */
9468 switch ((bRm & X86_MODRM_RM_MASK))
9469 {
9470 case 0: u32EffAddr = pCtx->eax; break;
9471 case 1: u32EffAddr = pCtx->ecx; break;
9472 case 2: u32EffAddr = pCtx->edx; break;
9473 case 3: u32EffAddr = pCtx->ebx; break;
9474 case 4: /* SIB */
9475 {
9476 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9477
9478 /* Get the index and scale it. */
9479 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9480 {
9481 case 0: u32EffAddr = pCtx->eax; break;
9482 case 1: u32EffAddr = pCtx->ecx; break;
9483 case 2: u32EffAddr = pCtx->edx; break;
9484 case 3: u32EffAddr = pCtx->ebx; break;
9485 case 4: u32EffAddr = 0; /*none */ break;
9486 case 5: u32EffAddr = pCtx->ebp; break;
9487 case 6: u32EffAddr = pCtx->esi; break;
9488 case 7: u32EffAddr = pCtx->edi; break;
9489 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9490 }
9491 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9492
9493 /* add base */
9494 switch (bSib & X86_SIB_BASE_MASK)
9495 {
9496 case 0: u32EffAddr += pCtx->eax; break;
9497 case 1: u32EffAddr += pCtx->ecx; break;
9498 case 2: u32EffAddr += pCtx->edx; break;
9499 case 3: u32EffAddr += pCtx->ebx; break;
9500 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9501 case 5:
9502 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9503 {
9504 u32EffAddr += pCtx->ebp;
9505 SET_SS_DEF();
9506 }
9507 else
9508 {
9509 uint32_t u32Disp;
9510 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9511 u32EffAddr += u32Disp;
9512 }
9513 break;
9514 case 6: u32EffAddr += pCtx->esi; break;
9515 case 7: u32EffAddr += pCtx->edi; break;
9516 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9517 }
9518 break;
9519 }
9520 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9521 case 6: u32EffAddr = pCtx->esi; break;
9522 case 7: u32EffAddr = pCtx->edi; break;
9523 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9524 }
9525
9526 /* Get and add the displacement. */
9527 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9528 {
9529 case 0:
9530 break;
9531 case 1:
9532 {
9533 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9534 u32EffAddr += i8Disp;
9535 break;
9536 }
9537 case 2:
9538 {
9539 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9540 u32EffAddr += u32Disp;
9541 break;
9542 }
9543 default:
9544 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9545 }
9546
9547 }
9548 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9549 *pGCPtrEff = u32EffAddr;
9550 else
9551 {
9552 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9553 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9554 }
9555 }
9556 }
9557 else
9558 {
9559 uint64_t u64EffAddr;
9560
9561 /* Handle the rip+disp32 form with no registers first. */
9562 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9563 {
9564 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9565 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9566 }
9567 else
9568 {
9569 /* Get the register (or SIB) value. */
9570 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9571 {
9572 case 0: u64EffAddr = pCtx->rax; break;
9573 case 1: u64EffAddr = pCtx->rcx; break;
9574 case 2: u64EffAddr = pCtx->rdx; break;
9575 case 3: u64EffAddr = pCtx->rbx; break;
9576 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9577 case 6: u64EffAddr = pCtx->rsi; break;
9578 case 7: u64EffAddr = pCtx->rdi; break;
9579 case 8: u64EffAddr = pCtx->r8; break;
9580 case 9: u64EffAddr = pCtx->r9; break;
9581 case 10: u64EffAddr = pCtx->r10; break;
9582 case 11: u64EffAddr = pCtx->r11; break;
9583 case 13: u64EffAddr = pCtx->r13; break;
9584 case 14: u64EffAddr = pCtx->r14; break;
9585 case 15: u64EffAddr = pCtx->r15; break;
9586 /* SIB */
9587 case 4:
9588 case 12:
9589 {
9590 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9591
9592 /* Get the index and scale it. */
9593 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9594 {
9595 case 0: u64EffAddr = pCtx->rax; break;
9596 case 1: u64EffAddr = pCtx->rcx; break;
9597 case 2: u64EffAddr = pCtx->rdx; break;
9598 case 3: u64EffAddr = pCtx->rbx; break;
9599 case 4: u64EffAddr = 0; /*none */ break;
9600 case 5: u64EffAddr = pCtx->rbp; break;
9601 case 6: u64EffAddr = pCtx->rsi; break;
9602 case 7: u64EffAddr = pCtx->rdi; break;
9603 case 8: u64EffAddr = pCtx->r8; break;
9604 case 9: u64EffAddr = pCtx->r9; break;
9605 case 10: u64EffAddr = pCtx->r10; break;
9606 case 11: u64EffAddr = pCtx->r11; break;
9607 case 12: u64EffAddr = pCtx->r12; break;
9608 case 13: u64EffAddr = pCtx->r13; break;
9609 case 14: u64EffAddr = pCtx->r14; break;
9610 case 15: u64EffAddr = pCtx->r15; break;
9611 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9612 }
9613 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9614
9615 /* add base */
9616 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9617 {
9618 case 0: u64EffAddr += pCtx->rax; break;
9619 case 1: u64EffAddr += pCtx->rcx; break;
9620 case 2: u64EffAddr += pCtx->rdx; break;
9621 case 3: u64EffAddr += pCtx->rbx; break;
9622 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9623 case 6: u64EffAddr += pCtx->rsi; break;
9624 case 7: u64EffAddr += pCtx->rdi; break;
9625 case 8: u64EffAddr += pCtx->r8; break;
9626 case 9: u64EffAddr += pCtx->r9; break;
9627 case 10: u64EffAddr += pCtx->r10; break;
9628 case 11: u64EffAddr += pCtx->r11; break;
9629 case 12: u64EffAddr += pCtx->r12; break;
9630 case 14: u64EffAddr += pCtx->r14; break;
9631 case 15: u64EffAddr += pCtx->r15; break;
9632 /* complicated encodings */
9633 case 5:
9634 case 13:
9635 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9636 {
9637 if (!pIemCpu->uRexB)
9638 {
9639 u64EffAddr += pCtx->rbp;
9640 SET_SS_DEF();
9641 }
9642 else
9643 u64EffAddr += pCtx->r13;
9644 }
9645 else
9646 {
9647 uint32_t u32Disp;
9648 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9649 u64EffAddr += (int32_t)u32Disp;
9650 }
9651 break;
9652 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9653 }
9654 break;
9655 }
9656 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9657 }
9658
9659 /* Get and add the displacement. */
9660 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9661 {
9662 case 0:
9663 break;
9664 case 1:
9665 {
9666 int8_t i8Disp;
9667 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9668 u64EffAddr += i8Disp;
9669 break;
9670 }
9671 case 2:
9672 {
9673 uint32_t u32Disp;
9674 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9675 u64EffAddr += (int32_t)u32Disp;
9676 break;
9677 }
9678 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9679 }
9680
9681 }
9682
9683 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9684 *pGCPtrEff = u64EffAddr;
9685 else
9686 {
9687 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9688 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9689 }
9690 }
9691
9692 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9693 return VINF_SUCCESS;
9694}
9695
9696/** @} */
9697
9698
9699
9700/*
9701 * Include the instructions
9702 */
9703#include "IEMAllInstructions.cpp.h"
9704
9705
9706
9707
9708#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9709
9710/**
9711 * Sets up execution verification mode.
9712 */
9713IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9714{
9715 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9716 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9717
9718 /*
9719 * Always note down the address of the current instruction.
9720 */
9721 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9722 pIemCpu->uOldRip = pOrgCtx->rip;
9723
9724 /*
9725 * Enable verification and/or logging.
9726 */
9727 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9728 if ( fNewNoRem
9729 && ( 0
9730#if 0 /* auto enable on first paged protected mode interrupt */
9731 || ( pOrgCtx->eflags.Bits.u1IF
9732 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9733 && TRPMHasTrap(pVCpu)
9734 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9735#endif
9736#if 0
9737 || ( pOrgCtx->cs == 0x10
9738 && ( pOrgCtx->rip == 0x90119e3e
9739 || pOrgCtx->rip == 0x901d9810)
9740#endif
9741#if 0 /* Auto enable DSL - FPU stuff. */
9742 || ( pOrgCtx->cs == 0x10
9743 && (// pOrgCtx->rip == 0xc02ec07f
9744 //|| pOrgCtx->rip == 0xc02ec082
9745 //|| pOrgCtx->rip == 0xc02ec0c9
9746 0
9747 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9748#endif
9749#if 0 /* Auto enable DSL - fstp st0 stuff. */
9750 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9751#endif
9752#if 0
9753 || pOrgCtx->rip == 0x9022bb3a
9754#endif
9755#if 0
9756 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9757#endif
9758#if 0
9759 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9760 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9761#endif
9762#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9763 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9764 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9765 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9766#endif
9767#if 0 /* NT4SP1 - xadd early boot. */
9768 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9769#endif
9770#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9771 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9772#endif
9773#if 0 /* NT4SP1 - cmpxchg (AMD). */
9774 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9775#endif
9776#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9777 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9778#endif
9779#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9780 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9781
9782#endif
9783#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9784 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9785
9786#endif
9787#if 0 /* NT4SP1 - frstor [ecx] */
9788 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9789#endif
9790#if 0 /* xxxxxx - All long mode code. */
9791 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9792#endif
9793#if 0 /* rep movsq linux 3.7 64-bit boot. */
9794 || (pOrgCtx->rip == 0x0000000000100241)
9795#endif
9796#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9797 || (pOrgCtx->rip == 0x000000000215e240)
9798#endif
9799#if 0 /* DOS's size-overridden iret to v8086. */
9800 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9801#endif
9802 )
9803 )
9804 {
9805 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9806 RTLogFlags(NULL, "enabled");
9807 fNewNoRem = false;
9808 }
9809 if (fNewNoRem != pIemCpu->fNoRem)
9810 {
9811 pIemCpu->fNoRem = fNewNoRem;
9812 if (!fNewNoRem)
9813 {
9814 LogAlways(("Enabling verification mode!\n"));
9815 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9816 }
9817 else
9818 LogAlways(("Disabling verification mode!\n"));
9819 }
9820
9821 /*
9822 * Switch state.
9823 */
9824 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9825 {
9826 static CPUMCTX s_DebugCtx; /* Ugly! */
9827
9828 s_DebugCtx = *pOrgCtx;
9829 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9830 }
9831
9832 /*
9833 * See if there is an interrupt pending in TRPM and inject it if we can.
9834 */
9835 pIemCpu->uInjectCpl = UINT8_MAX;
9836 if ( pOrgCtx->eflags.Bits.u1IF
9837 && TRPMHasTrap(pVCpu)
9838 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9839 {
9840 uint8_t u8TrapNo;
9841 TRPMEVENT enmType;
9842 RTGCUINT uErrCode;
9843 RTGCPTR uCr2;
9844 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9845 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9846 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9847 TRPMResetTrap(pVCpu);
9848 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9849 }
9850
9851 /*
9852 * Reset the counters.
9853 */
9854 pIemCpu->cIOReads = 0;
9855 pIemCpu->cIOWrites = 0;
9856 pIemCpu->fIgnoreRaxRdx = false;
9857 pIemCpu->fOverlappingMovs = false;
9858 pIemCpu->fProblematicMemory = false;
9859 pIemCpu->fUndefinedEFlags = 0;
9860
9861 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9862 {
9863 /*
9864 * Free all verification records.
9865 */
9866 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9867 pIemCpu->pIemEvtRecHead = NULL;
9868 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9869 do
9870 {
9871 while (pEvtRec)
9872 {
9873 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9874 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9875 pIemCpu->pFreeEvtRec = pEvtRec;
9876 pEvtRec = pNext;
9877 }
9878 pEvtRec = pIemCpu->pOtherEvtRecHead;
9879 pIemCpu->pOtherEvtRecHead = NULL;
9880 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9881 } while (pEvtRec);
9882 }
9883}
9884
9885
9886/**
9887 * Allocate an event record.
9888 * @returns Pointer to a record.
9889 */
9890IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9891{
9892 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9893 return NULL;
9894
9895 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9896 if (pEvtRec)
9897 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9898 else
9899 {
9900 if (!pIemCpu->ppIemEvtRecNext)
9901 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9902
9903 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9904 if (!pEvtRec)
9905 return NULL;
9906 }
9907 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9908 pEvtRec->pNext = NULL;
9909 return pEvtRec;
9910}
9911
9912
9913/**
9914 * IOMMMIORead notification.
9915 */
9916VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9917{
9918 PVMCPU pVCpu = VMMGetCpu(pVM);
9919 if (!pVCpu)
9920 return;
9921 PIEMCPU pIemCpu = &pVCpu->iem.s;
9922 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9923 if (!pEvtRec)
9924 return;
9925 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9926 pEvtRec->u.RamRead.GCPhys = GCPhys;
9927 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9928 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9929 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9930}
9931
9932
9933/**
9934 * IOMMMIOWrite notification.
9935 */
9936VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
9937{
9938 PVMCPU pVCpu = VMMGetCpu(pVM);
9939 if (!pVCpu)
9940 return;
9941 PIEMCPU pIemCpu = &pVCpu->iem.s;
9942 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9943 if (!pEvtRec)
9944 return;
9945 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
9946 pEvtRec->u.RamWrite.GCPhys = GCPhys;
9947 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
9948 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
9949 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
9950 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
9951 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
9952 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9953 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9954}
9955
9956
9957/**
9958 * IOMIOPortRead notification.
9959 */
9960VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
9961{
9962 PVMCPU pVCpu = VMMGetCpu(pVM);
9963 if (!pVCpu)
9964 return;
9965 PIEMCPU pIemCpu = &pVCpu->iem.s;
9966 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9967 if (!pEvtRec)
9968 return;
9969 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9970 pEvtRec->u.IOPortRead.Port = Port;
9971 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9972 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9973 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9974}
9975
9976/**
9977 * IOMIOPortWrite notification.
9978 */
9979VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9980{
9981 PVMCPU pVCpu = VMMGetCpu(pVM);
9982 if (!pVCpu)
9983 return;
9984 PIEMCPU pIemCpu = &pVCpu->iem.s;
9985 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9986 if (!pEvtRec)
9987 return;
9988 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
9989 pEvtRec->u.IOPortWrite.Port = Port;
9990 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
9991 pEvtRec->u.IOPortWrite.u32Value = u32Value;
9992 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9993 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9994}
9995
9996
9997VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
9998{
9999 AssertFailed();
10000}
10001
10002
10003VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
10004{
10005 AssertFailed();
10006}
10007
10008
10009/**
10010 * Fakes and records an I/O port read.
10011 *
10012 * @returns VINF_SUCCESS.
10013 * @param pIemCpu The IEM per CPU data.
10014 * @param Port The I/O port.
10015 * @param pu32Value Where to store the fake value.
10016 * @param cbValue The size of the access.
10017 */
10018IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10019{
10020 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10021 if (pEvtRec)
10022 {
10023 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10024 pEvtRec->u.IOPortRead.Port = Port;
10025 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
10026 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10027 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10028 }
10029 pIemCpu->cIOReads++;
10030 *pu32Value = 0xcccccccc;
10031 return VINF_SUCCESS;
10032}
10033
10034
10035/**
10036 * Fakes and records an I/O port write.
10037 *
10038 * @returns VINF_SUCCESS.
10039 * @param pIemCpu The IEM per CPU data.
10040 * @param Port The I/O port.
10041 * @param u32Value The value being written.
10042 * @param cbValue The size of the access.
10043 */
10044IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10045{
10046 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10047 if (pEvtRec)
10048 {
10049 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10050 pEvtRec->u.IOPortWrite.Port = Port;
10051 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10052 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10053 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10054 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10055 }
10056 pIemCpu->cIOWrites++;
10057 return VINF_SUCCESS;
10058}
10059
10060
10061/**
10062 * Used to add extra details about a stub case.
10063 * @param pIemCpu The IEM per CPU state.
10064 */
10065IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10066{
10067 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10068 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10069 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10070 char szRegs[4096];
10071 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10072 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10073 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10074 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10075 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10076 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10077 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10078 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10079 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10080 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10081 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10082 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10083 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10084 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10085 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10086 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10087 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10088 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10089 " efer=%016VR{efer}\n"
10090 " pat=%016VR{pat}\n"
10091 " sf_mask=%016VR{sf_mask}\n"
10092 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10093 " lstar=%016VR{lstar}\n"
10094 " star=%016VR{star} cstar=%016VR{cstar}\n"
10095 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10096 );
10097
10098 char szInstr1[256];
10099 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10100 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10101 szInstr1, sizeof(szInstr1), NULL);
10102 char szInstr2[256];
10103 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10104 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10105 szInstr2, sizeof(szInstr2), NULL);
10106
10107 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10108}
10109
10110
10111/**
10112 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10113 * dump to the assertion info.
10114 *
10115 * @param pEvtRec The record to dump.
10116 */
10117IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10118{
10119 switch (pEvtRec->enmEvent)
10120 {
10121 case IEMVERIFYEVENT_IOPORT_READ:
10122 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10123 pEvtRec->u.IOPortWrite.Port,
10124 pEvtRec->u.IOPortWrite.cbValue);
10125 break;
10126 case IEMVERIFYEVENT_IOPORT_WRITE:
10127 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10128 pEvtRec->u.IOPortWrite.Port,
10129 pEvtRec->u.IOPortWrite.cbValue,
10130 pEvtRec->u.IOPortWrite.u32Value);
10131 break;
10132 case IEMVERIFYEVENT_RAM_READ:
10133 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10134 pEvtRec->u.RamRead.GCPhys,
10135 pEvtRec->u.RamRead.cb);
10136 break;
10137 case IEMVERIFYEVENT_RAM_WRITE:
10138 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10139 pEvtRec->u.RamWrite.GCPhys,
10140 pEvtRec->u.RamWrite.cb,
10141 (int)pEvtRec->u.RamWrite.cb,
10142 pEvtRec->u.RamWrite.ab);
10143 break;
10144 default:
10145 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10146 break;
10147 }
10148}
10149
10150
10151/**
10152 * Raises an assertion on the specified record, showing the given message with
10153 * a record dump attached.
10154 *
10155 * @param pIemCpu The IEM per CPU data.
10156 * @param pEvtRec1 The first record.
10157 * @param pEvtRec2 The second record.
10158 * @param pszMsg The message explaining why we're asserting.
10159 */
10160IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10161{
10162 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10163 iemVerifyAssertAddRecordDump(pEvtRec1);
10164 iemVerifyAssertAddRecordDump(pEvtRec2);
10165 iemVerifyAssertMsg2(pIemCpu);
10166 RTAssertPanic();
10167}
10168
10169
10170/**
10171 * Raises an assertion on the specified record, showing the given message with
10172 * a record dump attached.
10173 *
10174 * @param pIemCpu The IEM per CPU data.
10175 * @param pEvtRec1 The first record.
10176 * @param pszMsg The message explaining why we're asserting.
10177 */
10178IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10179{
10180 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10181 iemVerifyAssertAddRecordDump(pEvtRec);
10182 iemVerifyAssertMsg2(pIemCpu);
10183 RTAssertPanic();
10184}
10185
10186
10187/**
10188 * Verifies a write record.
10189 *
10190 * @param pIemCpu The IEM per CPU data.
10191 * @param pEvtRec The write record.
10192 * @param fRem Set if REM was doing the other executing. If clear
10193 * it was HM.
10194 */
10195IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10196{
10197 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10198 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10199 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10200 if ( RT_FAILURE(rc)
10201 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10202 {
10203 /* fend off ins */
10204 if ( !pIemCpu->cIOReads
10205 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10206 || ( pEvtRec->u.RamWrite.cb != 1
10207 && pEvtRec->u.RamWrite.cb != 2
10208 && pEvtRec->u.RamWrite.cb != 4) )
10209 {
10210 /* fend off ROMs and MMIO */
10211 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10212 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10213 {
10214 /* fend off fxsave */
10215 if (pEvtRec->u.RamWrite.cb != 512)
10216 {
10217 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10218 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10219 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10220 RTAssertMsg2Add("%s: %.*Rhxs\n"
10221 "iem: %.*Rhxs\n",
10222 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10223 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10224 iemVerifyAssertAddRecordDump(pEvtRec);
10225 iemVerifyAssertMsg2(pIemCpu);
10226 RTAssertPanic();
10227 }
10228 }
10229 }
10230 }
10231
10232}
10233
10234/**
10235 * Performs the post-execution verfication checks.
10236 */
10237IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10238{
10239 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10240 return;
10241
10242 /*
10243 * Switch back the state.
10244 */
10245 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10246 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10247 Assert(pOrgCtx != pDebugCtx);
10248 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10249
10250 /*
10251 * Execute the instruction in REM.
10252 */
10253 bool fRem = false;
10254 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10255 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10256 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10257#ifdef IEM_VERIFICATION_MODE_FULL_HM
10258 if ( HMIsEnabled(pVM)
10259 && pIemCpu->cIOReads == 0
10260 && pIemCpu->cIOWrites == 0
10261 && !pIemCpu->fProblematicMemory)
10262 {
10263 uint64_t uStartRip = pOrgCtx->rip;
10264 unsigned iLoops = 0;
10265 do
10266 {
10267 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10268 iLoops++;
10269 } while ( rc == VINF_SUCCESS
10270 || ( rc == VINF_EM_DBG_STEPPED
10271 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10272 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10273 || ( pOrgCtx->rip != pDebugCtx->rip
10274 && pIemCpu->uInjectCpl != UINT8_MAX
10275 && iLoops < 8) );
10276 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10277 rc = VINF_SUCCESS;
10278 }
10279#endif
10280 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10281 || rc == VINF_IOM_R3_IOPORT_READ
10282 || rc == VINF_IOM_R3_IOPORT_WRITE
10283 || rc == VINF_IOM_R3_MMIO_READ
10284 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10285 || rc == VINF_IOM_R3_MMIO_WRITE
10286 || rc == VINF_CPUM_R3_MSR_READ
10287 || rc == VINF_CPUM_R3_MSR_WRITE
10288 || rc == VINF_EM_RESCHEDULE
10289 )
10290 {
10291 EMRemLock(pVM);
10292 rc = REMR3EmulateInstruction(pVM, pVCpu);
10293 AssertRC(rc);
10294 EMRemUnlock(pVM);
10295 fRem = true;
10296 }
10297
10298 /*
10299 * Compare the register states.
10300 */
10301 unsigned cDiffs = 0;
10302 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10303 {
10304 //Log(("REM and IEM ends up with different registers!\n"));
10305 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10306
10307# define CHECK_FIELD(a_Field) \
10308 do \
10309 { \
10310 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10311 { \
10312 switch (sizeof(pOrgCtx->a_Field)) \
10313 { \
10314 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10315 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10316 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10317 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10318 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10319 } \
10320 cDiffs++; \
10321 } \
10322 } while (0)
10323# define CHECK_XSTATE_FIELD(a_Field) \
10324 do \
10325 { \
10326 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10327 { \
10328 switch (sizeof(pOrgCtx->a_Field)) \
10329 { \
10330 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10331 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10332 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10333 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10334 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10335 } \
10336 cDiffs++; \
10337 } \
10338 } while (0)
10339
10340# define CHECK_BIT_FIELD(a_Field) \
10341 do \
10342 { \
10343 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10344 { \
10345 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10346 cDiffs++; \
10347 } \
10348 } while (0)
10349
10350# define CHECK_SEL(a_Sel) \
10351 do \
10352 { \
10353 CHECK_FIELD(a_Sel.Sel); \
10354 CHECK_FIELD(a_Sel.Attr.u); \
10355 CHECK_FIELD(a_Sel.u64Base); \
10356 CHECK_FIELD(a_Sel.u32Limit); \
10357 CHECK_FIELD(a_Sel.fFlags); \
10358 } while (0)
10359
10360 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10361 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10362
10363#if 1 /* The recompiler doesn't update these the intel way. */
10364 if (fRem)
10365 {
10366 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10367 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10368 pOrgXState->x87.CS = pDebugXState->x87.CS;
10369 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10370 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10371 pOrgXState->x87.DS = pDebugXState->x87.DS;
10372 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10373 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10374 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10375 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10376 }
10377#endif
10378 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10379 {
10380 RTAssertMsg2Weak(" the FPU state differs\n");
10381 cDiffs++;
10382 CHECK_XSTATE_FIELD(x87.FCW);
10383 CHECK_XSTATE_FIELD(x87.FSW);
10384 CHECK_XSTATE_FIELD(x87.FTW);
10385 CHECK_XSTATE_FIELD(x87.FOP);
10386 CHECK_XSTATE_FIELD(x87.FPUIP);
10387 CHECK_XSTATE_FIELD(x87.CS);
10388 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10389 CHECK_XSTATE_FIELD(x87.FPUDP);
10390 CHECK_XSTATE_FIELD(x87.DS);
10391 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10392 CHECK_XSTATE_FIELD(x87.MXCSR);
10393 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10394 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10395 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10396 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10397 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10398 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10399 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10400 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10401 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10402 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10403 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10404 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10405 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10406 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10407 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10408 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10409 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10410 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10411 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10412 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10413 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10414 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10415 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10416 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10417 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10418 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10419 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10420 }
10421 CHECK_FIELD(rip);
10422 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10423 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10424 {
10425 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10426 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10427 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10428 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10429 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10430 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10431 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10432 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10433 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10434 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10435 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10436 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10437 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10438 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10439 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10440 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10441 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10442 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10443 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10444 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10445 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10446 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10447 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10448 }
10449
10450 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10451 CHECK_FIELD(rax);
10452 CHECK_FIELD(rcx);
10453 if (!pIemCpu->fIgnoreRaxRdx)
10454 CHECK_FIELD(rdx);
10455 CHECK_FIELD(rbx);
10456 CHECK_FIELD(rsp);
10457 CHECK_FIELD(rbp);
10458 CHECK_FIELD(rsi);
10459 CHECK_FIELD(rdi);
10460 CHECK_FIELD(r8);
10461 CHECK_FIELD(r9);
10462 CHECK_FIELD(r10);
10463 CHECK_FIELD(r11);
10464 CHECK_FIELD(r12);
10465 CHECK_FIELD(r13);
10466 CHECK_SEL(cs);
10467 CHECK_SEL(ss);
10468 CHECK_SEL(ds);
10469 CHECK_SEL(es);
10470 CHECK_SEL(fs);
10471 CHECK_SEL(gs);
10472 CHECK_FIELD(cr0);
10473
10474 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10475 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10476 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10477 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10478 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10479 {
10480 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10481 { /* ignore */ }
10482 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10483 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10484 && fRem)
10485 { /* ignore */ }
10486 else
10487 CHECK_FIELD(cr2);
10488 }
10489 CHECK_FIELD(cr3);
10490 CHECK_FIELD(cr4);
10491 CHECK_FIELD(dr[0]);
10492 CHECK_FIELD(dr[1]);
10493 CHECK_FIELD(dr[2]);
10494 CHECK_FIELD(dr[3]);
10495 CHECK_FIELD(dr[6]);
10496 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10497 CHECK_FIELD(dr[7]);
10498 CHECK_FIELD(gdtr.cbGdt);
10499 CHECK_FIELD(gdtr.pGdt);
10500 CHECK_FIELD(idtr.cbIdt);
10501 CHECK_FIELD(idtr.pIdt);
10502 CHECK_SEL(ldtr);
10503 CHECK_SEL(tr);
10504 CHECK_FIELD(SysEnter.cs);
10505 CHECK_FIELD(SysEnter.eip);
10506 CHECK_FIELD(SysEnter.esp);
10507 CHECK_FIELD(msrEFER);
10508 CHECK_FIELD(msrSTAR);
10509 CHECK_FIELD(msrPAT);
10510 CHECK_FIELD(msrLSTAR);
10511 CHECK_FIELD(msrCSTAR);
10512 CHECK_FIELD(msrSFMASK);
10513 CHECK_FIELD(msrKERNELGSBASE);
10514
10515 if (cDiffs != 0)
10516 {
10517 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10518 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10519 iemVerifyAssertMsg2(pIemCpu);
10520 RTAssertPanic();
10521 }
10522# undef CHECK_FIELD
10523# undef CHECK_BIT_FIELD
10524 }
10525
10526 /*
10527 * If the register state compared fine, check the verification event
10528 * records.
10529 */
10530 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10531 {
10532 /*
10533 * Compare verficiation event records.
10534 * - I/O port accesses should be a 1:1 match.
10535 */
10536 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10537 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10538 while (pIemRec && pOtherRec)
10539 {
10540 /* Since we might miss RAM writes and reads, ignore reads and check
10541 that any written memory is the same extra ones. */
10542 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10543 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10544 && pIemRec->pNext)
10545 {
10546 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10547 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10548 pIemRec = pIemRec->pNext;
10549 }
10550
10551 /* Do the compare. */
10552 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10553 {
10554 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10555 break;
10556 }
10557 bool fEquals;
10558 switch (pIemRec->enmEvent)
10559 {
10560 case IEMVERIFYEVENT_IOPORT_READ:
10561 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10562 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10563 break;
10564 case IEMVERIFYEVENT_IOPORT_WRITE:
10565 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10566 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10567 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10568 break;
10569 case IEMVERIFYEVENT_RAM_READ:
10570 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10571 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10572 break;
10573 case IEMVERIFYEVENT_RAM_WRITE:
10574 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10575 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10576 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10577 break;
10578 default:
10579 fEquals = false;
10580 break;
10581 }
10582 if (!fEquals)
10583 {
10584 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10585 break;
10586 }
10587
10588 /* advance */
10589 pIemRec = pIemRec->pNext;
10590 pOtherRec = pOtherRec->pNext;
10591 }
10592
10593 /* Ignore extra writes and reads. */
10594 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10595 {
10596 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10597 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10598 pIemRec = pIemRec->pNext;
10599 }
10600 if (pIemRec != NULL)
10601 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10602 else if (pOtherRec != NULL)
10603 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10604 }
10605 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10606}
10607
10608#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10609
10610/* stubs */
10611IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10612{
10613 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10614 return VERR_INTERNAL_ERROR;
10615}
10616
10617IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10618{
10619 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10620 return VERR_INTERNAL_ERROR;
10621}
10622
10623#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10624
10625
10626#ifdef LOG_ENABLED
10627/**
10628 * Logs the current instruction.
10629 * @param pVCpu The cross context virtual CPU structure of the caller.
10630 * @param pCtx The current CPU context.
10631 * @param fSameCtx Set if we have the same context information as the VMM,
10632 * clear if we may have already executed an instruction in
10633 * our debug context. When clear, we assume IEMCPU holds
10634 * valid CPU mode info.
10635 */
10636IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10637{
10638# ifdef IN_RING3
10639 if (LogIs2Enabled())
10640 {
10641 char szInstr[256];
10642 uint32_t cbInstr = 0;
10643 if (fSameCtx)
10644 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10645 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10646 szInstr, sizeof(szInstr), &cbInstr);
10647 else
10648 {
10649 uint32_t fFlags = 0;
10650 switch (pVCpu->iem.s.enmCpuMode)
10651 {
10652 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10653 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10654 case IEMMODE_16BIT:
10655 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10656 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10657 else
10658 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10659 break;
10660 }
10661 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10662 szInstr, sizeof(szInstr), &cbInstr);
10663 }
10664
10665 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10666 Log2(("****\n"
10667 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10668 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10669 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10670 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10671 " %s\n"
10672 ,
10673 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10674 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10675 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10676 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10677 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10678 szInstr));
10679
10680 if (LogIs3Enabled())
10681 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10682 }
10683 else
10684# endif
10685 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10686 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10687}
10688#endif
10689
10690
10691/**
10692 * Makes status code addjustments (pass up from I/O and access handler)
10693 * as well as maintaining statistics.
10694 *
10695 * @returns Strict VBox status code to pass up.
10696 * @param pIemCpu The IEM per CPU data.
10697 * @param rcStrict The status from executing an instruction.
10698 */
10699DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10700{
10701 if (rcStrict != VINF_SUCCESS)
10702 {
10703 if (RT_SUCCESS(rcStrict))
10704 {
10705 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10706 || rcStrict == VINF_IOM_R3_IOPORT_READ
10707 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10708 || rcStrict == VINF_IOM_R3_MMIO_READ
10709 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10710 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10711 || rcStrict == VINF_CPUM_R3_MSR_READ
10712 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10713 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10714 /* raw-mode / virt handlers only: */
10715 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10716 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10717 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10718 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10719 || rcStrict == VINF_SELM_SYNC_GDT
10720 || rcStrict == VINF_CSAM_PENDING_ACTION
10721 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10722 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10723/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10724 int32_t const rcPassUp = pIemCpu->rcPassUp;
10725 if (rcPassUp == VINF_SUCCESS)
10726 pIemCpu->cRetInfStatuses++;
10727 else if ( rcPassUp < VINF_EM_FIRST
10728 || rcPassUp > VINF_EM_LAST
10729 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10730 {
10731 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10732 pIemCpu->cRetPassUpStatus++;
10733 rcStrict = rcPassUp;
10734 }
10735 else
10736 {
10737 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10738 pIemCpu->cRetInfStatuses++;
10739 }
10740 }
10741 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10742 pIemCpu->cRetAspectNotImplemented++;
10743 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10744 pIemCpu->cRetInstrNotImplemented++;
10745#ifdef IEM_VERIFICATION_MODE_FULL
10746 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10747 rcStrict = VINF_SUCCESS;
10748#endif
10749 else
10750 pIemCpu->cRetErrStatuses++;
10751 }
10752 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10753 {
10754 pIemCpu->cRetPassUpStatus++;
10755 rcStrict = pIemCpu->rcPassUp;
10756 }
10757
10758 return rcStrict;
10759}
10760
10761
10762/**
10763 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10764 * IEMExecOneWithPrefetchedByPC.
10765 *
10766 * @return Strict VBox status code.
10767 * @param pVCpu The current virtual CPU.
10768 * @param pIemCpu The IEM per CPU data.
10769 * @param fExecuteInhibit If set, execute the instruction following CLI,
10770 * POP SS and MOV SS,GR.
10771 */
10772DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10773{
10774 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10775 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10776 if (rcStrict == VINF_SUCCESS)
10777 pIemCpu->cInstructions++;
10778 if (pIemCpu->cActiveMappings > 0)
10779 iemMemRollback(pIemCpu);
10780//#ifdef DEBUG
10781// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10782//#endif
10783
10784 /* Execute the next instruction as well if a cli, pop ss or
10785 mov ss, Gr has just completed successfully. */
10786 if ( fExecuteInhibit
10787 && rcStrict == VINF_SUCCESS
10788 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10789 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10790 {
10791 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10792 if (rcStrict == VINF_SUCCESS)
10793 {
10794# ifdef LOG_ENABLED
10795 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10796# endif
10797 IEM_OPCODE_GET_NEXT_U8(&b);
10798 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10799 if (rcStrict == VINF_SUCCESS)
10800 pIemCpu->cInstructions++;
10801 if (pIemCpu->cActiveMappings > 0)
10802 iemMemRollback(pIemCpu);
10803 }
10804 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10805 }
10806
10807 /*
10808 * Return value fiddling, statistics and sanity assertions.
10809 */
10810 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10811
10812 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10813 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10814#if defined(IEM_VERIFICATION_MODE_FULL)
10815 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10816 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10817 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10818 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10819#endif
10820 return rcStrict;
10821}
10822
10823
10824#ifdef IN_RC
10825/**
10826 * Re-enters raw-mode or ensure we return to ring-3.
10827 *
10828 * @returns rcStrict, maybe modified.
10829 * @param pIemCpu The IEM CPU structure.
10830 * @param pVCpu The cross context virtual CPU structure of the caller.
10831 * @param pCtx The current CPU context.
10832 * @param rcStrict The status code returne by the interpreter.
10833 */
10834DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10835{
10836 if (!pIemCpu->fInPatchCode)
10837 CPUMRawEnter(pVCpu);
10838 return rcStrict;
10839}
10840#endif
10841
10842
10843/**
10844 * Execute one instruction.
10845 *
10846 * @return Strict VBox status code.
10847 * @param pVCpu The current virtual CPU.
10848 */
10849VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10850{
10851 PIEMCPU pIemCpu = &pVCpu->iem.s;
10852
10853#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10854 iemExecVerificationModeSetup(pIemCpu);
10855#endif
10856#ifdef LOG_ENABLED
10857 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10858 iemLogCurInstr(pVCpu, pCtx, true);
10859#endif
10860
10861 /*
10862 * Do the decoding and emulation.
10863 */
10864 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10865 if (rcStrict == VINF_SUCCESS)
10866 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10867
10868#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10869 /*
10870 * Assert some sanity.
10871 */
10872 iemExecVerificationModeCheck(pIemCpu);
10873#endif
10874#ifdef IN_RC
10875 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10876#endif
10877 if (rcStrict != VINF_SUCCESS)
10878 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10879 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10880 return rcStrict;
10881}
10882
10883
10884VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10885{
10886 PIEMCPU pIemCpu = &pVCpu->iem.s;
10887 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10888 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10889
10890 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10891 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10892 if (rcStrict == VINF_SUCCESS)
10893 {
10894 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10895 if (pcbWritten)
10896 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10897 }
10898
10899#ifdef IN_RC
10900 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10901#endif
10902 return rcStrict;
10903}
10904
10905
10906VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10907 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10908{
10909 PIEMCPU pIemCpu = &pVCpu->iem.s;
10910 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10911 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10912
10913 VBOXSTRICTRC rcStrict;
10914 if ( cbOpcodeBytes
10915 && pCtx->rip == OpcodeBytesPC)
10916 {
10917 iemInitDecoder(pIemCpu, false);
10918 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10919 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10920 rcStrict = VINF_SUCCESS;
10921 }
10922 else
10923 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10924 if (rcStrict == VINF_SUCCESS)
10925 {
10926 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10927 }
10928
10929#ifdef IN_RC
10930 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10931#endif
10932 return rcStrict;
10933}
10934
10935
10936VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10937{
10938 PIEMCPU pIemCpu = &pVCpu->iem.s;
10939 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10940 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10941
10942 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10943 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10944 if (rcStrict == VINF_SUCCESS)
10945 {
10946 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10947 if (pcbWritten)
10948 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10949 }
10950
10951#ifdef IN_RC
10952 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10953#endif
10954 return rcStrict;
10955}
10956
10957
10958VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10959 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10960{
10961 PIEMCPU pIemCpu = &pVCpu->iem.s;
10962 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10963 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10964
10965 VBOXSTRICTRC rcStrict;
10966 if ( cbOpcodeBytes
10967 && pCtx->rip == OpcodeBytesPC)
10968 {
10969 iemInitDecoder(pIemCpu, true);
10970 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10971 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10972 rcStrict = VINF_SUCCESS;
10973 }
10974 else
10975 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10976 if (rcStrict == VINF_SUCCESS)
10977 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10978
10979#ifdef IN_RC
10980 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10981#endif
10982 return rcStrict;
10983}
10984
10985
10986VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
10987{
10988 PIEMCPU pIemCpu = &pVCpu->iem.s;
10989
10990 /*
10991 * See if there is an interrupt pending in TRPM and inject it if we can.
10992 */
10993#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
10994 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10995# ifdef IEM_VERIFICATION_MODE_FULL
10996 pIemCpu->uInjectCpl = UINT8_MAX;
10997# endif
10998 if ( pCtx->eflags.Bits.u1IF
10999 && TRPMHasTrap(pVCpu)
11000 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11001 {
11002 uint8_t u8TrapNo;
11003 TRPMEVENT enmType;
11004 RTGCUINT uErrCode;
11005 RTGCPTR uCr2;
11006 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11007 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11008 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11009 TRPMResetTrap(pVCpu);
11010 }
11011#else
11012 iemExecVerificationModeSetup(pIemCpu);
11013 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11014#endif
11015
11016 /*
11017 * Log the state.
11018 */
11019#ifdef LOG_ENABLED
11020 iemLogCurInstr(pVCpu, pCtx, true);
11021#endif
11022
11023 /*
11024 * Do the decoding and emulation.
11025 */
11026 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11027 if (rcStrict == VINF_SUCCESS)
11028 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11029
11030#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11031 /*
11032 * Assert some sanity.
11033 */
11034 iemExecVerificationModeCheck(pIemCpu);
11035#endif
11036
11037 /*
11038 * Maybe re-enter raw-mode and log.
11039 */
11040#ifdef IN_RC
11041 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11042#endif
11043 if (rcStrict != VINF_SUCCESS)
11044 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11045 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11046 return rcStrict;
11047}
11048
11049
11050
11051/**
11052 * Injects a trap, fault, abort, software interrupt or external interrupt.
11053 *
11054 * The parameter list matches TRPMQueryTrapAll pretty closely.
11055 *
11056 * @returns Strict VBox status code.
11057 * @param pVCpu The current virtual CPU.
11058 * @param u8TrapNo The trap number.
11059 * @param enmType What type is it (trap/fault/abort), software
11060 * interrupt or hardware interrupt.
11061 * @param uErrCode The error code if applicable.
11062 * @param uCr2 The CR2 value if applicable.
11063 * @param cbInstr The instruction length (only relevant for
11064 * software interrupts).
11065 */
11066VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11067 uint8_t cbInstr)
11068{
11069 iemInitDecoder(&pVCpu->iem.s, false);
11070#ifdef DBGFTRACE_ENABLED
11071 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11072 u8TrapNo, enmType, uErrCode, uCr2);
11073#endif
11074
11075 uint32_t fFlags;
11076 switch (enmType)
11077 {
11078 case TRPM_HARDWARE_INT:
11079 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11080 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11081 uErrCode = uCr2 = 0;
11082 break;
11083
11084 case TRPM_SOFTWARE_INT:
11085 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11086 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11087 uErrCode = uCr2 = 0;
11088 break;
11089
11090 case TRPM_TRAP:
11091 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11092 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11093 if (u8TrapNo == X86_XCPT_PF)
11094 fFlags |= IEM_XCPT_FLAGS_CR2;
11095 switch (u8TrapNo)
11096 {
11097 case X86_XCPT_DF:
11098 case X86_XCPT_TS:
11099 case X86_XCPT_NP:
11100 case X86_XCPT_SS:
11101 case X86_XCPT_PF:
11102 case X86_XCPT_AC:
11103 fFlags |= IEM_XCPT_FLAGS_ERR;
11104 break;
11105
11106 case X86_XCPT_NMI:
11107 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11108 break;
11109 }
11110 break;
11111
11112 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11113 }
11114
11115 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11116}
11117
11118
11119/**
11120 * Injects the active TRPM event.
11121 *
11122 * @returns Strict VBox status code.
11123 * @param pVCpu Pointer to the VMCPU.
11124 */
11125VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11126{
11127#ifndef IEM_IMPLEMENTS_TASKSWITCH
11128 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11129#else
11130 uint8_t u8TrapNo;
11131 TRPMEVENT enmType;
11132 RTGCUINT uErrCode;
11133 RTGCUINTPTR uCr2;
11134 uint8_t cbInstr;
11135 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11136 if (RT_FAILURE(rc))
11137 return rc;
11138
11139 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11140
11141 /** @todo Are there any other codes that imply the event was successfully
11142 * delivered to the guest? See @bugref{6607}. */
11143 if ( rcStrict == VINF_SUCCESS
11144 || rcStrict == VINF_IEM_RAISED_XCPT)
11145 {
11146 TRPMResetTrap(pVCpu);
11147 }
11148 return rcStrict;
11149#endif
11150}
11151
11152
11153VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11154{
11155 return VERR_NOT_IMPLEMENTED;
11156}
11157
11158
11159VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11160{
11161 return VERR_NOT_IMPLEMENTED;
11162}
11163
11164
11165#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11166/**
11167 * Executes a IRET instruction with default operand size.
11168 *
11169 * This is for PATM.
11170 *
11171 * @returns VBox status code.
11172 * @param pVCpu The current virtual CPU.
11173 * @param pCtxCore The register frame.
11174 */
11175VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11176{
11177 PIEMCPU pIemCpu = &pVCpu->iem.s;
11178 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11179
11180 iemCtxCoreToCtx(pCtx, pCtxCore);
11181 iemInitDecoder(pIemCpu);
11182 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11183 if (rcStrict == VINF_SUCCESS)
11184 iemCtxToCtxCore(pCtxCore, pCtx);
11185 else
11186 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11187 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11188 return rcStrict;
11189}
11190#endif
11191
11192
11193/**
11194 * Macro used by the IEMExec* method to check the given instruction length.
11195 *
11196 * Will return on failure!
11197 *
11198 * @param a_cbInstr The given instruction length.
11199 * @param a_cbMin The minimum length.
11200 */
11201#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11202 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11203 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11204
11205
11206/**
11207 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11208 *
11209 * This API ASSUMES that the caller has already verified that the guest code is
11210 * allowed to access the I/O port. (The I/O port is in the DX register in the
11211 * guest state.)
11212 *
11213 * @returns Strict VBox status code.
11214 * @param pVCpu The cross context per virtual CPU structure.
11215 * @param cbValue The size of the I/O port access (1, 2, or 4).
11216 * @param enmAddrMode The addressing mode.
11217 * @param fRepPrefix Indicates whether a repeat prefix is used
11218 * (doesn't matter which for this instruction).
11219 * @param cbInstr The instruction length in bytes.
11220 * @param iEffSeg The effective segment address.
11221 */
11222VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11223 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11224{
11225 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11226 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11227
11228 /*
11229 * State init.
11230 */
11231 PIEMCPU pIemCpu = &pVCpu->iem.s;
11232 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11233
11234 /*
11235 * Switch orgy for getting to the right handler.
11236 */
11237 VBOXSTRICTRC rcStrict;
11238 if (fRepPrefix)
11239 {
11240 switch (enmAddrMode)
11241 {
11242 case IEMMODE_16BIT:
11243 switch (cbValue)
11244 {
11245 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11246 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11247 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11248 default:
11249 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11250 }
11251 break;
11252
11253 case IEMMODE_32BIT:
11254 switch (cbValue)
11255 {
11256 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11257 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11258 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11259 default:
11260 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11261 }
11262 break;
11263
11264 case IEMMODE_64BIT:
11265 switch (cbValue)
11266 {
11267 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11268 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11269 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11270 default:
11271 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11272 }
11273 break;
11274
11275 default:
11276 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11277 }
11278 }
11279 else
11280 {
11281 switch (enmAddrMode)
11282 {
11283 case IEMMODE_16BIT:
11284 switch (cbValue)
11285 {
11286 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11287 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11288 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11289 default:
11290 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11291 }
11292 break;
11293
11294 case IEMMODE_32BIT:
11295 switch (cbValue)
11296 {
11297 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11298 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11299 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11300 default:
11301 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11302 }
11303 break;
11304
11305 case IEMMODE_64BIT:
11306 switch (cbValue)
11307 {
11308 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11309 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11310 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11311 default:
11312 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11313 }
11314 break;
11315
11316 default:
11317 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11318 }
11319 }
11320
11321 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11322}
11323
11324
11325/**
11326 * Interface for HM and EM for executing string I/O IN (read) instructions.
11327 *
11328 * This API ASSUMES that the caller has already verified that the guest code is
11329 * allowed to access the I/O port. (The I/O port is in the DX register in the
11330 * guest state.)
11331 *
11332 * @returns Strict VBox status code.
11333 * @param pVCpu The cross context per virtual CPU structure.
11334 * @param cbValue The size of the I/O port access (1, 2, or 4).
11335 * @param enmAddrMode The addressing mode.
11336 * @param fRepPrefix Indicates whether a repeat prefix is used
11337 * (doesn't matter which for this instruction).
11338 * @param cbInstr The instruction length in bytes.
11339 */
11340VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11341 bool fRepPrefix, uint8_t cbInstr)
11342{
11343 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11344
11345 /*
11346 * State init.
11347 */
11348 PIEMCPU pIemCpu = &pVCpu->iem.s;
11349 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11350
11351 /*
11352 * Switch orgy for getting to the right handler.
11353 */
11354 VBOXSTRICTRC rcStrict;
11355 if (fRepPrefix)
11356 {
11357 switch (enmAddrMode)
11358 {
11359 case IEMMODE_16BIT:
11360 switch (cbValue)
11361 {
11362 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11363 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11364 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11365 default:
11366 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11367 }
11368 break;
11369
11370 case IEMMODE_32BIT:
11371 switch (cbValue)
11372 {
11373 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11374 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11375 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11376 default:
11377 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11378 }
11379 break;
11380
11381 case IEMMODE_64BIT:
11382 switch (cbValue)
11383 {
11384 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11385 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11386 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11387 default:
11388 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11389 }
11390 break;
11391
11392 default:
11393 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11394 }
11395 }
11396 else
11397 {
11398 switch (enmAddrMode)
11399 {
11400 case IEMMODE_16BIT:
11401 switch (cbValue)
11402 {
11403 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11404 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11405 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11406 default:
11407 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11408 }
11409 break;
11410
11411 case IEMMODE_32BIT:
11412 switch (cbValue)
11413 {
11414 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11415 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11416 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11417 default:
11418 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11419 }
11420 break;
11421
11422 case IEMMODE_64BIT:
11423 switch (cbValue)
11424 {
11425 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11426 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11427 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11428 default:
11429 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11430 }
11431 break;
11432
11433 default:
11434 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11435 }
11436 }
11437
11438 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11439}
11440
11441
11442
11443/**
11444 * Interface for HM and EM to write to a CRx register.
11445 *
11446 * @returns Strict VBox status code.
11447 * @param pVCpu The cross context per virtual CPU structure.
11448 * @param cbInstr The instruction length in bytes.
11449 * @param iCrReg The control register number (destination).
11450 * @param iGReg The general purpose register number (source).
11451 *
11452 * @remarks In ring-0 not all of the state needs to be synced in.
11453 */
11454VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11455{
11456 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11457 Assert(iCrReg < 16);
11458 Assert(iGReg < 16);
11459
11460 PIEMCPU pIemCpu = &pVCpu->iem.s;
11461 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11462 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11463 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11464}
11465
11466
11467/**
11468 * Interface for HM and EM to read from a CRx register.
11469 *
11470 * @returns Strict VBox status code.
11471 * @param pVCpu The cross context per virtual CPU structure.
11472 * @param cbInstr The instruction length in bytes.
11473 * @param iGReg The general purpose register number (destination).
11474 * @param iCrReg The control register number (source).
11475 *
11476 * @remarks In ring-0 not all of the state needs to be synced in.
11477 */
11478VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11479{
11480 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11481 Assert(iCrReg < 16);
11482 Assert(iGReg < 16);
11483
11484 PIEMCPU pIemCpu = &pVCpu->iem.s;
11485 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11486 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11487 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11488}
11489
11490
11491/**
11492 * Interface for HM and EM to clear the CR0[TS] bit.
11493 *
11494 * @returns Strict VBox status code.
11495 * @param pVCpu The cross context per virtual CPU structure.
11496 * @param cbInstr The instruction length in bytes.
11497 *
11498 * @remarks In ring-0 not all of the state needs to be synced in.
11499 */
11500VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11501{
11502 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11503
11504 PIEMCPU pIemCpu = &pVCpu->iem.s;
11505 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11506 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11507 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11508}
11509
11510
11511/**
11512 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11513 *
11514 * @returns Strict VBox status code.
11515 * @param pVCpu The cross context per virtual CPU structure.
11516 * @param cbInstr The instruction length in bytes.
11517 * @param uValue The value to load into CR0.
11518 *
11519 * @remarks In ring-0 not all of the state needs to be synced in.
11520 */
11521VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11522{
11523 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11524
11525 PIEMCPU pIemCpu = &pVCpu->iem.s;
11526 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11527 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11528 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11529}
11530
11531
11532/**
11533 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11534 *
11535 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11536 *
11537 * @returns Strict VBox status code.
11538 * @param pVCpu The cross context per virtual CPU structure of the
11539 * calling EMT.
11540 * @param cbInstr The instruction length in bytes.
11541 * @remarks In ring-0 not all of the state needs to be synced in.
11542 * @threads EMT(pVCpu)
11543 */
11544VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11545{
11546 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11547
11548 PIEMCPU pIemCpu = &pVCpu->iem.s;
11549 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11550 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11551 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11552}
11553
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette