VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 59285

Last change on this file since 59285 was 59285, checked in by vboxsync, 9 years ago

iprt/x86.h,*: Drop IntRedirBitmap from X86TSS32 and X86TSS64.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 442.9 KB
Line 
1/* $Id: IEMAll.cpp 59285 2016-01-08 00:02:39Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 *
72 */
73
74/** @def IEM_VERIFICATION_MODE_MINIMAL
75 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
76 * context. */
77#if defined(DOXYGEN_RUNNING)
78# define IEM_VERIFICATION_MODE_MINIMAL
79#endif
80//#define IEM_LOG_MEMORY_WRITES
81#define IEM_IMPLEMENTS_TASKSWITCH
82
83
84/*********************************************************************************************************************************
85* Header Files *
86*********************************************************************************************************************************/
87#define LOG_GROUP LOG_GROUP_IEM
88#include <VBox/vmm/iem.h>
89#include <VBox/vmm/cpum.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <internal/pgm.h>
93#include <VBox/vmm/iom.h>
94#include <VBox/vmm/em.h>
95#include <VBox/vmm/hm.h>
96#include <VBox/vmm/tm.h>
97#include <VBox/vmm/dbgf.h>
98#include <VBox/vmm/dbgftrace.h>
99#ifdef VBOX_WITH_RAW_MODE_NOT_R0
100# include <VBox/vmm/patm.h>
101# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
102# include <VBox/vmm/csam.h>
103# endif
104#endif
105#include "IEMInternal.h"
106#ifdef IEM_VERIFICATION_MODE_FULL
107# include <VBox/vmm/rem.h>
108# include <VBox/vmm/mm.h>
109#endif
110#include <VBox/vmm/vm.h>
111#include <VBox/log.h>
112#include <VBox/err.h>
113#include <VBox/param.h>
114#include <VBox/dis.h>
115#include <VBox/disopcode.h>
116#include <iprt/assert.h>
117#include <iprt/string.h>
118#include <iprt/x86.h>
119
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/** @typedef PFNIEMOP
126 * Pointer to an opcode decoder function.
127 */
128
129/** @def FNIEMOP_DEF
130 * Define an opcode decoder function.
131 *
132 * We're using macors for this so that adding and removing parameters as well as
133 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
134 *
135 * @param a_Name The function name.
136 */
137
138
139#if defined(__GNUC__) && defined(RT_ARCH_X86)
140typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
141# define FNIEMOP_DEF(a_Name) \
142 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
143# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
144 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
145# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
146 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
147
148#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
149typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
150# define FNIEMOP_DEF(a_Name) \
151 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
152# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
153 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
154# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
155 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
156
157#elif defined(__GNUC__)
158typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
159# define FNIEMOP_DEF(a_Name) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
161# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
163# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
165
166#else
167typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
168# define FNIEMOP_DEF(a_Name) \
169 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
170# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
171 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
173 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
174
175#endif
176
177
178/**
179 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
180 */
181typedef union IEMSELDESC
182{
183 /** The legacy view. */
184 X86DESC Legacy;
185 /** The long mode view. */
186 X86DESC64 Long;
187} IEMSELDESC;
188/** Pointer to a selector descriptor table entry. */
189typedef IEMSELDESC *PIEMSELDESC;
190
191
192/*********************************************************************************************************************************
193* Defined Constants And Macros *
194*********************************************************************************************************************************/
195/** Temporary hack to disable the double execution. Will be removed in favor
196 * of a dedicated execution mode in EM. */
197//#define IEM_VERIFICATION_MODE_NO_REM
198
199/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
200 * due to GCC lacking knowledge about the value range of a switch. */
201#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
202
203/**
204 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
205 * occation.
206 */
207#ifdef LOG_ENABLED
208# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
209 do { \
210 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
212 } while (0)
213#else
214# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
215 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
216#endif
217
218/**
219 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
220 * occation using the supplied logger statement.
221 *
222 * @param a_LoggerArgs What to log on failure.
223 */
224#ifdef LOG_ENABLED
225# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
226 do { \
227 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
228 /*LogFunc(a_LoggerArgs);*/ \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
230 } while (0)
231#else
232# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
233 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
234#endif
235
236/**
237 * Call an opcode decoder function.
238 *
239 * We're using macors for this so that adding and removing parameters can be
240 * done as we please. See FNIEMOP_DEF.
241 */
242#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
243
244/**
245 * Call a common opcode decoder function taking one extra argument.
246 *
247 * We're using macors for this so that adding and removing parameters can be
248 * done as we please. See FNIEMOP_DEF_1.
249 */
250#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
251
252/**
253 * Call a common opcode decoder function taking one extra argument.
254 *
255 * We're using macors for this so that adding and removing parameters can be
256 * done as we please. See FNIEMOP_DEF_1.
257 */
258#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
259
260/**
261 * Check if we're currently executing in real or virtual 8086 mode.
262 *
263 * @returns @c true if it is, @c false if not.
264 * @param a_pIemCpu The IEM state of the current CPU.
265 */
266#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
267
268/**
269 * Check if we're currently executing in virtual 8086 mode.
270 *
271 * @returns @c true if it is, @c false if not.
272 * @param a_pIemCpu The IEM state of the current CPU.
273 */
274#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
275
276/**
277 * Check if we're currently executing in long mode.
278 *
279 * @returns @c true if it is, @c false if not.
280 * @param a_pIemCpu The IEM state of the current CPU.
281 */
282#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
283
284/**
285 * Check if we're currently executing in real mode.
286 *
287 * @returns @c true if it is, @c false if not.
288 * @param a_pIemCpu The IEM state of the current CPU.
289 */
290#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
291
292/**
293 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
294 * @returns PCCPUMFEATURES
295 * @param a_pIemCpu The IEM state of the current CPU.
296 */
297#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
298
299/**
300 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
301 * @returns PCCPUMFEATURES
302 * @param a_pIemCpu The IEM state of the current CPU.
303 */
304#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
305
306/**
307 * Evaluates to true if we're presenting an Intel CPU to the guest.
308 */
309#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
310
311/**
312 * Evaluates to true if we're presenting an AMD CPU to the guest.
313 */
314#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
315
316/**
317 * Check if the address is canonical.
318 */
319#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
320
321
322/*********************************************************************************************************************************
323* Global Variables *
324*********************************************************************************************************************************/
325extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
326
327
328/** Function table for the ADD instruction. */
329IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
330{
331 iemAImpl_add_u8, iemAImpl_add_u8_locked,
332 iemAImpl_add_u16, iemAImpl_add_u16_locked,
333 iemAImpl_add_u32, iemAImpl_add_u32_locked,
334 iemAImpl_add_u64, iemAImpl_add_u64_locked
335};
336
337/** Function table for the ADC instruction. */
338IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
339{
340 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
341 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
342 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
343 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
344};
345
346/** Function table for the SUB instruction. */
347IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
348{
349 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
350 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
351 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
352 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
353};
354
355/** Function table for the SBB instruction. */
356IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
357{
358 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
359 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
360 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
361 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
362};
363
364/** Function table for the OR instruction. */
365IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
366{
367 iemAImpl_or_u8, iemAImpl_or_u8_locked,
368 iemAImpl_or_u16, iemAImpl_or_u16_locked,
369 iemAImpl_or_u32, iemAImpl_or_u32_locked,
370 iemAImpl_or_u64, iemAImpl_or_u64_locked
371};
372
373/** Function table for the XOR instruction. */
374IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
375{
376 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
377 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
378 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
379 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
380};
381
382/** Function table for the AND instruction. */
383IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
384{
385 iemAImpl_and_u8, iemAImpl_and_u8_locked,
386 iemAImpl_and_u16, iemAImpl_and_u16_locked,
387 iemAImpl_and_u32, iemAImpl_and_u32_locked,
388 iemAImpl_and_u64, iemAImpl_and_u64_locked
389};
390
391/** Function table for the CMP instruction.
392 * @remarks Making operand order ASSUMPTIONS.
393 */
394IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
395{
396 iemAImpl_cmp_u8, NULL,
397 iemAImpl_cmp_u16, NULL,
398 iemAImpl_cmp_u32, NULL,
399 iemAImpl_cmp_u64, NULL
400};
401
402/** Function table for the TEST instruction.
403 * @remarks Making operand order ASSUMPTIONS.
404 */
405IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
406{
407 iemAImpl_test_u8, NULL,
408 iemAImpl_test_u16, NULL,
409 iemAImpl_test_u32, NULL,
410 iemAImpl_test_u64, NULL
411};
412
413/** Function table for the BT instruction. */
414IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
415{
416 NULL, NULL,
417 iemAImpl_bt_u16, NULL,
418 iemAImpl_bt_u32, NULL,
419 iemAImpl_bt_u64, NULL
420};
421
422/** Function table for the BTC instruction. */
423IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
424{
425 NULL, NULL,
426 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
427 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
428 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
429};
430
431/** Function table for the BTR instruction. */
432IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
433{
434 NULL, NULL,
435 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
436 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
437 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
438};
439
440/** Function table for the BTS instruction. */
441IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
442{
443 NULL, NULL,
444 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
445 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
446 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
447};
448
449/** Function table for the BSF instruction. */
450IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
451{
452 NULL, NULL,
453 iemAImpl_bsf_u16, NULL,
454 iemAImpl_bsf_u32, NULL,
455 iemAImpl_bsf_u64, NULL
456};
457
458/** Function table for the BSR instruction. */
459IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
460{
461 NULL, NULL,
462 iemAImpl_bsr_u16, NULL,
463 iemAImpl_bsr_u32, NULL,
464 iemAImpl_bsr_u64, NULL
465};
466
467/** Function table for the IMUL instruction. */
468IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
469{
470 NULL, NULL,
471 iemAImpl_imul_two_u16, NULL,
472 iemAImpl_imul_two_u32, NULL,
473 iemAImpl_imul_two_u64, NULL
474};
475
476/** Group 1 /r lookup table. */
477IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
478{
479 &g_iemAImpl_add,
480 &g_iemAImpl_or,
481 &g_iemAImpl_adc,
482 &g_iemAImpl_sbb,
483 &g_iemAImpl_and,
484 &g_iemAImpl_sub,
485 &g_iemAImpl_xor,
486 &g_iemAImpl_cmp
487};
488
489/** Function table for the INC instruction. */
490IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
491{
492 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
493 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
494 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
495 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
496};
497
498/** Function table for the DEC instruction. */
499IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
500{
501 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
502 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
503 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
504 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
505};
506
507/** Function table for the NEG instruction. */
508IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
509{
510 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
511 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
512 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
513 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
514};
515
516/** Function table for the NOT instruction. */
517IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
518{
519 iemAImpl_not_u8, iemAImpl_not_u8_locked,
520 iemAImpl_not_u16, iemAImpl_not_u16_locked,
521 iemAImpl_not_u32, iemAImpl_not_u32_locked,
522 iemAImpl_not_u64, iemAImpl_not_u64_locked
523};
524
525
526/** Function table for the ROL instruction. */
527IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
528{
529 iemAImpl_rol_u8,
530 iemAImpl_rol_u16,
531 iemAImpl_rol_u32,
532 iemAImpl_rol_u64
533};
534
535/** Function table for the ROR instruction. */
536IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
537{
538 iemAImpl_ror_u8,
539 iemAImpl_ror_u16,
540 iemAImpl_ror_u32,
541 iemAImpl_ror_u64
542};
543
544/** Function table for the RCL instruction. */
545IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
546{
547 iemAImpl_rcl_u8,
548 iemAImpl_rcl_u16,
549 iemAImpl_rcl_u32,
550 iemAImpl_rcl_u64
551};
552
553/** Function table for the RCR instruction. */
554IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
555{
556 iemAImpl_rcr_u8,
557 iemAImpl_rcr_u16,
558 iemAImpl_rcr_u32,
559 iemAImpl_rcr_u64
560};
561
562/** Function table for the SHL instruction. */
563IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
564{
565 iemAImpl_shl_u8,
566 iemAImpl_shl_u16,
567 iemAImpl_shl_u32,
568 iemAImpl_shl_u64
569};
570
571/** Function table for the SHR instruction. */
572IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
573{
574 iemAImpl_shr_u8,
575 iemAImpl_shr_u16,
576 iemAImpl_shr_u32,
577 iemAImpl_shr_u64
578};
579
580/** Function table for the SAR instruction. */
581IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
582{
583 iemAImpl_sar_u8,
584 iemAImpl_sar_u16,
585 iemAImpl_sar_u32,
586 iemAImpl_sar_u64
587};
588
589
590/** Function table for the MUL instruction. */
591IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
592{
593 iemAImpl_mul_u8,
594 iemAImpl_mul_u16,
595 iemAImpl_mul_u32,
596 iemAImpl_mul_u64
597};
598
599/** Function table for the IMUL instruction working implicitly on rAX. */
600IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
601{
602 iemAImpl_imul_u8,
603 iemAImpl_imul_u16,
604 iemAImpl_imul_u32,
605 iemAImpl_imul_u64
606};
607
608/** Function table for the DIV instruction. */
609IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
610{
611 iemAImpl_div_u8,
612 iemAImpl_div_u16,
613 iemAImpl_div_u32,
614 iemAImpl_div_u64
615};
616
617/** Function table for the MUL instruction. */
618IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
619{
620 iemAImpl_idiv_u8,
621 iemAImpl_idiv_u16,
622 iemAImpl_idiv_u32,
623 iemAImpl_idiv_u64
624};
625
626/** Function table for the SHLD instruction */
627IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
628{
629 iemAImpl_shld_u16,
630 iemAImpl_shld_u32,
631 iemAImpl_shld_u64,
632};
633
634/** Function table for the SHRD instruction */
635IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
636{
637 iemAImpl_shrd_u16,
638 iemAImpl_shrd_u32,
639 iemAImpl_shrd_u64,
640};
641
642
643/** Function table for the PUNPCKLBW instruction */
644IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
645/** Function table for the PUNPCKLBD instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
647/** Function table for the PUNPCKLDQ instruction */
648IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
649/** Function table for the PUNPCKLQDQ instruction */
650IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
651
652/** Function table for the PUNPCKHBW instruction */
653IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
654/** Function table for the PUNPCKHBD instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
656/** Function table for the PUNPCKHDQ instruction */
657IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
658/** Function table for the PUNPCKHQDQ instruction */
659IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
660
661/** Function table for the PXOR instruction */
662IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
663/** Function table for the PCMPEQB instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
665/** Function table for the PCMPEQW instruction */
666IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
667/** Function table for the PCMPEQD instruction */
668IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
669
670
671#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
672/** What IEM just wrote. */
673uint8_t g_abIemWrote[256];
674/** How much IEM just wrote. */
675size_t g_cbIemWrote;
676#endif
677
678
679/*********************************************************************************************************************************
680* Internal Functions *
681*********************************************************************************************************************************/
682IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
683IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
684IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
685IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
686/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
687IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
688IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
689IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
691IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
692IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
693IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
694IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
695IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
696IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
698IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
699IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
700IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
701IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
706IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
707IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
708IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
709IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
710IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
711IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
712IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
713IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
714IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
715
716#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
717IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
718#endif
719IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
720IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
721
722
723
724/**
725 * Sets the pass up status.
726 *
727 * @returns VINF_SUCCESS.
728 * @param pIemCpu The per CPU IEM state of the calling thread.
729 * @param rcPassUp The pass up status. Must be informational.
730 * VINF_SUCCESS is not allowed.
731 */
732IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
733{
734 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
735
736 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
737 if (rcOldPassUp == VINF_SUCCESS)
738 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
739 /* If both are EM scheduling codes, use EM priority rules. */
740 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
741 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
742 {
743 if (rcPassUp < rcOldPassUp)
744 {
745 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
746 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
747 }
748 else
749 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
750 }
751 /* Override EM scheduling with specific status code. */
752 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
753 {
754 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
755 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
756 }
757 /* Don't override specific status code, first come first served. */
758 else
759 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
760 return VINF_SUCCESS;
761}
762
763
764/**
765 * Initializes the execution state.
766 *
767 * @param pIemCpu The per CPU IEM state.
768 * @param fBypassHandlers Whether to bypass access handlers.
769 */
770DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
771{
772 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
773 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
774
775 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
776 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
777
778#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
779 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
780 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
781 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
782 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
783 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
784 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
785 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
786 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
787#endif
788
789#ifdef VBOX_WITH_RAW_MODE_NOT_R0
790 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
791#endif
792 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
793 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
794 ? IEMMODE_64BIT
795 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
796 ? IEMMODE_32BIT
797 : IEMMODE_16BIT;
798 pIemCpu->enmCpuMode = enmMode;
799#ifdef VBOX_STRICT
800 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
801 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
802 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
803 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
804 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
805 pIemCpu->uRexReg = 127;
806 pIemCpu->uRexB = 127;
807 pIemCpu->uRexIndex = 127;
808 pIemCpu->iEffSeg = 127;
809 pIemCpu->offOpcode = 127;
810 pIemCpu->cbOpcode = 127;
811#endif
812
813 pIemCpu->cActiveMappings = 0;
814 pIemCpu->iNextMapping = 0;
815 pIemCpu->rcPassUp = VINF_SUCCESS;
816 pIemCpu->fBypassHandlers = fBypassHandlers;
817#ifdef VBOX_WITH_RAW_MODE_NOT_R0
818 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
819 && pCtx->cs.u64Base == 0
820 && pCtx->cs.u32Limit == UINT32_MAX
821 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
822 if (!pIemCpu->fInPatchCode)
823 CPUMRawLeave(pVCpu, VINF_SUCCESS);
824#endif
825}
826
827
828/**
829 * Initializes the decoder state.
830 *
831 * @param pIemCpu The per CPU IEM state.
832 * @param fBypassHandlers Whether to bypass access handlers.
833 */
834DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
835{
836 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
837 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
838
839 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
840 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
841
842#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
843 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
844 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
849 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
850 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
851#endif
852
853#ifdef VBOX_WITH_RAW_MODE_NOT_R0
854 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
855#endif
856 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
857#ifdef IEM_VERIFICATION_MODE_FULL
858 if (pIemCpu->uInjectCpl != UINT8_MAX)
859 pIemCpu->uCpl = pIemCpu->uInjectCpl;
860#endif
861 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
862 ? IEMMODE_64BIT
863 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
864 ? IEMMODE_32BIT
865 : IEMMODE_16BIT;
866 pIemCpu->enmCpuMode = enmMode;
867 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
868 pIemCpu->enmEffAddrMode = enmMode;
869 if (enmMode != IEMMODE_64BIT)
870 {
871 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
872 pIemCpu->enmEffOpSize = enmMode;
873 }
874 else
875 {
876 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
877 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
878 }
879 pIemCpu->fPrefixes = 0;
880 pIemCpu->uRexReg = 0;
881 pIemCpu->uRexB = 0;
882 pIemCpu->uRexIndex = 0;
883 pIemCpu->iEffSeg = X86_SREG_DS;
884 pIemCpu->offOpcode = 0;
885 pIemCpu->cbOpcode = 0;
886 pIemCpu->cActiveMappings = 0;
887 pIemCpu->iNextMapping = 0;
888 pIemCpu->rcPassUp = VINF_SUCCESS;
889 pIemCpu->fBypassHandlers = fBypassHandlers;
890#ifdef VBOX_WITH_RAW_MODE_NOT_R0
891 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
892 && pCtx->cs.u64Base == 0
893 && pCtx->cs.u32Limit == UINT32_MAX
894 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
895 if (!pIemCpu->fInPatchCode)
896 CPUMRawLeave(pVCpu, VINF_SUCCESS);
897#endif
898
899#ifdef DBGFTRACE_ENABLED
900 switch (enmMode)
901 {
902 case IEMMODE_64BIT:
903 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
904 break;
905 case IEMMODE_32BIT:
906 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
907 break;
908 case IEMMODE_16BIT:
909 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
910 break;
911 }
912#endif
913}
914
915
916/**
917 * Prefetch opcodes the first time when starting executing.
918 *
919 * @returns Strict VBox status code.
920 * @param pIemCpu The IEM state.
921 * @param fBypassHandlers Whether to bypass access handlers.
922 */
923IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
924{
925#ifdef IEM_VERIFICATION_MODE_FULL
926 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
927#endif
928 iemInitDecoder(pIemCpu, fBypassHandlers);
929
930 /*
931 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
932 *
933 * First translate CS:rIP to a physical address.
934 */
935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
936 uint32_t cbToTryRead;
937 RTGCPTR GCPtrPC;
938 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
939 {
940 cbToTryRead = PAGE_SIZE;
941 GCPtrPC = pCtx->rip;
942 if (!IEM_IS_CANONICAL(GCPtrPC))
943 return iemRaiseGeneralProtectionFault0(pIemCpu);
944 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
945 }
946 else
947 {
948 uint32_t GCPtrPC32 = pCtx->eip;
949 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
950 if (GCPtrPC32 > pCtx->cs.u32Limit)
951 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
952 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
953 if (!cbToTryRead) /* overflowed */
954 {
955 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
956 cbToTryRead = UINT32_MAX;
957 }
958 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
959 Assert(GCPtrPC <= UINT32_MAX);
960 }
961
962#ifdef VBOX_WITH_RAW_MODE_NOT_R0
963 /* Allow interpretation of patch manager code blocks since they can for
964 instance throw #PFs for perfectly good reasons. */
965 if (pIemCpu->fInPatchCode)
966 {
967 size_t cbRead = 0;
968 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
969 AssertRCReturn(rc, rc);
970 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
971 return VINF_SUCCESS;
972 }
973#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
974
975 RTGCPHYS GCPhys;
976 uint64_t fFlags;
977 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
978 if (RT_FAILURE(rc))
979 {
980 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
981 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
982 }
983 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
984 {
985 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
986 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
987 }
988 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
989 {
990 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
991 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
992 }
993 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
994 /** @todo Check reserved bits and such stuff. PGM is better at doing
995 * that, so do it when implementing the guest virtual address
996 * TLB... */
997
998#ifdef IEM_VERIFICATION_MODE_FULL
999 /*
1000 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1001 * instruction.
1002 */
1003 /** @todo optimize this differently by not using PGMPhysRead. */
1004 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1005 pIemCpu->GCPhysOpcodes = GCPhys;
1006 if ( offPrevOpcodes < cbOldOpcodes
1007 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1008 {
1009 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1010 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1011 pIemCpu->cbOpcode = cbNew;
1012 return VINF_SUCCESS;
1013 }
1014#endif
1015
1016 /*
1017 * Read the bytes at this address.
1018 */
1019 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1020#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1021 size_t cbActual;
1022 if ( PATMIsEnabled(pVM)
1023 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1024 {
1025 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1026 Assert(cbActual > 0);
1027 pIemCpu->cbOpcode = (uint8_t)cbActual;
1028 }
1029 else
1030#endif
1031 {
1032 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1033 if (cbToTryRead > cbLeftOnPage)
1034 cbToTryRead = cbLeftOnPage;
1035 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1036 cbToTryRead = sizeof(pIemCpu->abOpcode);
1037
1038 if (!pIemCpu->fBypassHandlers)
1039 {
1040 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1041 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1042 { /* likely */ }
1043 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1044 {
1045 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1046 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1047 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1048 }
1049 else
1050 {
1051 Log((RT_SUCCESS(rcStrict)
1052 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1053 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1054 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1055 return rcStrict;
1056 }
1057 }
1058 else
1059 {
1060 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1061 if (RT_SUCCESS(rc))
1062 { /* likely */ }
1063 else
1064 {
1065 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1066 GCPtrPC, GCPhys, rc, cbToTryRead));
1067 return rc;
1068 }
1069 }
1070 pIemCpu->cbOpcode = cbToTryRead;
1071 }
1072
1073 return VINF_SUCCESS;
1074}
1075
1076
1077/**
1078 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1079 * exception if it fails.
1080 *
1081 * @returns Strict VBox status code.
1082 * @param pIemCpu The IEM state.
1083 * @param cbMin The minimum number of bytes relative offOpcode
1084 * that must be read.
1085 */
1086IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1087{
1088 /*
1089 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1090 *
1091 * First translate CS:rIP to a physical address.
1092 */
1093 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1094 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1095 uint32_t cbToTryRead;
1096 RTGCPTR GCPtrNext;
1097 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1098 {
1099 cbToTryRead = PAGE_SIZE;
1100 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1101 if (!IEM_IS_CANONICAL(GCPtrNext))
1102 return iemRaiseGeneralProtectionFault0(pIemCpu);
1103 }
1104 else
1105 {
1106 uint32_t GCPtrNext32 = pCtx->eip;
1107 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1108 GCPtrNext32 += pIemCpu->cbOpcode;
1109 if (GCPtrNext32 > pCtx->cs.u32Limit)
1110 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1111 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1112 if (!cbToTryRead) /* overflowed */
1113 {
1114 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1115 cbToTryRead = UINT32_MAX;
1116 /** @todo check out wrapping around the code segment. */
1117 }
1118 if (cbToTryRead < cbMin - cbLeft)
1119 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1120 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1121 }
1122
1123 /* Only read up to the end of the page, and make sure we don't read more
1124 than the opcode buffer can hold. */
1125 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1126 if (cbToTryRead > cbLeftOnPage)
1127 cbToTryRead = cbLeftOnPage;
1128 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1129 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1130/** @todo r=bird: Convert assertion into undefined opcode exception? */
1131 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1132
1133#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1134 /* Allow interpretation of patch manager code blocks since they can for
1135 instance throw #PFs for perfectly good reasons. */
1136 if (pIemCpu->fInPatchCode)
1137 {
1138 size_t cbRead = 0;
1139 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1140 AssertRCReturn(rc, rc);
1141 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1142 return VINF_SUCCESS;
1143 }
1144#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1145
1146 RTGCPHYS GCPhys;
1147 uint64_t fFlags;
1148 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1149 if (RT_FAILURE(rc))
1150 {
1151 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1152 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1153 }
1154 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1155 {
1156 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1157 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1158 }
1159 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1160 {
1161 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1162 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1163 }
1164 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1165 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1166 /** @todo Check reserved bits and such stuff. PGM is better at doing
1167 * that, so do it when implementing the guest virtual address
1168 * TLB... */
1169
1170 /*
1171 * Read the bytes at this address.
1172 *
1173 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1174 * and since PATM should only patch the start of an instruction there
1175 * should be no need to check again here.
1176 */
1177 if (!pIemCpu->fBypassHandlers)
1178 {
1179 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1180 cbToTryRead, PGMACCESSORIGIN_IEM);
1181 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1182 { /* likely */ }
1183 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1184 {
1185 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1186 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1187 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1188 }
1189 else
1190 {
1191 Log((RT_SUCCESS(rcStrict)
1192 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1193 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1194 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1195 return rcStrict;
1196 }
1197 }
1198 else
1199 {
1200 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1201 if (RT_SUCCESS(rc))
1202 { /* likely */ }
1203 else
1204 {
1205 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1206 return rc;
1207 }
1208 }
1209 pIemCpu->cbOpcode += cbToTryRead;
1210 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1211
1212 return VINF_SUCCESS;
1213}
1214
1215
1216/**
1217 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1218 *
1219 * @returns Strict VBox status code.
1220 * @param pIemCpu The IEM state.
1221 * @param pb Where to return the opcode byte.
1222 */
1223DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1224{
1225 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1226 if (rcStrict == VINF_SUCCESS)
1227 {
1228 uint8_t offOpcode = pIemCpu->offOpcode;
1229 *pb = pIemCpu->abOpcode[offOpcode];
1230 pIemCpu->offOpcode = offOpcode + 1;
1231 }
1232 else
1233 *pb = 0;
1234 return rcStrict;
1235}
1236
1237
1238/**
1239 * Fetches the next opcode byte.
1240 *
1241 * @returns Strict VBox status code.
1242 * @param pIemCpu The IEM state.
1243 * @param pu8 Where to return the opcode byte.
1244 */
1245DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1246{
1247 uint8_t const offOpcode = pIemCpu->offOpcode;
1248 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1249 {
1250 *pu8 = pIemCpu->abOpcode[offOpcode];
1251 pIemCpu->offOpcode = offOpcode + 1;
1252 return VINF_SUCCESS;
1253 }
1254 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1255}
1256
1257
1258/**
1259 * Fetches the next opcode byte, returns automatically on failure.
1260 *
1261 * @param a_pu8 Where to return the opcode byte.
1262 * @remark Implicitly references pIemCpu.
1263 */
1264#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1265 do \
1266 { \
1267 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1268 if (rcStrict2 != VINF_SUCCESS) \
1269 return rcStrict2; \
1270 } while (0)
1271
1272
1273/**
1274 * Fetches the next signed byte from the opcode stream.
1275 *
1276 * @returns Strict VBox status code.
1277 * @param pIemCpu The IEM state.
1278 * @param pi8 Where to return the signed byte.
1279 */
1280DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1281{
1282 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1283}
1284
1285
1286/**
1287 * Fetches the next signed byte from the opcode stream, returning automatically
1288 * on failure.
1289 *
1290 * @param a_pi8 Where to return the signed byte.
1291 * @remark Implicitly references pIemCpu.
1292 */
1293#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1294 do \
1295 { \
1296 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1297 if (rcStrict2 != VINF_SUCCESS) \
1298 return rcStrict2; \
1299 } while (0)
1300
1301
1302/**
1303 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1304 *
1305 * @returns Strict VBox status code.
1306 * @param pIemCpu The IEM state.
1307 * @param pu16 Where to return the opcode dword.
1308 */
1309DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1310{
1311 uint8_t u8;
1312 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1313 if (rcStrict == VINF_SUCCESS)
1314 *pu16 = (int8_t)u8;
1315 return rcStrict;
1316}
1317
1318
1319/**
1320 * Fetches the next signed byte from the opcode stream, extending it to
1321 * unsigned 16-bit.
1322 *
1323 * @returns Strict VBox status code.
1324 * @param pIemCpu The IEM state.
1325 * @param pu16 Where to return the unsigned word.
1326 */
1327DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1328{
1329 uint8_t const offOpcode = pIemCpu->offOpcode;
1330 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1331 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1332
1333 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1334 pIemCpu->offOpcode = offOpcode + 1;
1335 return VINF_SUCCESS;
1336}
1337
1338
1339/**
1340 * Fetches the next signed byte from the opcode stream and sign-extending it to
1341 * a word, returning automatically on failure.
1342 *
1343 * @param a_pu16 Where to return the word.
1344 * @remark Implicitly references pIemCpu.
1345 */
1346#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1347 do \
1348 { \
1349 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1350 if (rcStrict2 != VINF_SUCCESS) \
1351 return rcStrict2; \
1352 } while (0)
1353
1354
1355/**
1356 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1357 *
1358 * @returns Strict VBox status code.
1359 * @param pIemCpu The IEM state.
1360 * @param pu32 Where to return the opcode dword.
1361 */
1362DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1363{
1364 uint8_t u8;
1365 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1366 if (rcStrict == VINF_SUCCESS)
1367 *pu32 = (int8_t)u8;
1368 return rcStrict;
1369}
1370
1371
1372/**
1373 * Fetches the next signed byte from the opcode stream, extending it to
1374 * unsigned 32-bit.
1375 *
1376 * @returns Strict VBox status code.
1377 * @param pIemCpu The IEM state.
1378 * @param pu32 Where to return the unsigned dword.
1379 */
1380DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1381{
1382 uint8_t const offOpcode = pIemCpu->offOpcode;
1383 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1384 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1385
1386 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1387 pIemCpu->offOpcode = offOpcode + 1;
1388 return VINF_SUCCESS;
1389}
1390
1391
1392/**
1393 * Fetches the next signed byte from the opcode stream and sign-extending it to
1394 * a word, returning automatically on failure.
1395 *
1396 * @param a_pu32 Where to return the word.
1397 * @remark Implicitly references pIemCpu.
1398 */
1399#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1400 do \
1401 { \
1402 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1403 if (rcStrict2 != VINF_SUCCESS) \
1404 return rcStrict2; \
1405 } while (0)
1406
1407
1408/**
1409 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1410 *
1411 * @returns Strict VBox status code.
1412 * @param pIemCpu The IEM state.
1413 * @param pu64 Where to return the opcode qword.
1414 */
1415DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1416{
1417 uint8_t u8;
1418 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1419 if (rcStrict == VINF_SUCCESS)
1420 *pu64 = (int8_t)u8;
1421 return rcStrict;
1422}
1423
1424
1425/**
1426 * Fetches the next signed byte from the opcode stream, extending it to
1427 * unsigned 64-bit.
1428 *
1429 * @returns Strict VBox status code.
1430 * @param pIemCpu The IEM state.
1431 * @param pu64 Where to return the unsigned qword.
1432 */
1433DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1434{
1435 uint8_t const offOpcode = pIemCpu->offOpcode;
1436 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1437 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1438
1439 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1440 pIemCpu->offOpcode = offOpcode + 1;
1441 return VINF_SUCCESS;
1442}
1443
1444
1445/**
1446 * Fetches the next signed byte from the opcode stream and sign-extending it to
1447 * a word, returning automatically on failure.
1448 *
1449 * @param a_pu64 Where to return the word.
1450 * @remark Implicitly references pIemCpu.
1451 */
1452#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1453 do \
1454 { \
1455 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1456 if (rcStrict2 != VINF_SUCCESS) \
1457 return rcStrict2; \
1458 } while (0)
1459
1460
1461/**
1462 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1463 *
1464 * @returns Strict VBox status code.
1465 * @param pIemCpu The IEM state.
1466 * @param pu16 Where to return the opcode word.
1467 */
1468DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1469{
1470 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1471 if (rcStrict == VINF_SUCCESS)
1472 {
1473 uint8_t offOpcode = pIemCpu->offOpcode;
1474 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1475 pIemCpu->offOpcode = offOpcode + 2;
1476 }
1477 else
1478 *pu16 = 0;
1479 return rcStrict;
1480}
1481
1482
1483/**
1484 * Fetches the next opcode word.
1485 *
1486 * @returns Strict VBox status code.
1487 * @param pIemCpu The IEM state.
1488 * @param pu16 Where to return the opcode word.
1489 */
1490DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1491{
1492 uint8_t const offOpcode = pIemCpu->offOpcode;
1493 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1494 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1495
1496 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1497 pIemCpu->offOpcode = offOpcode + 2;
1498 return VINF_SUCCESS;
1499}
1500
1501
1502/**
1503 * Fetches the next opcode word, returns automatically on failure.
1504 *
1505 * @param a_pu16 Where to return the opcode word.
1506 * @remark Implicitly references pIemCpu.
1507 */
1508#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1509 do \
1510 { \
1511 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1512 if (rcStrict2 != VINF_SUCCESS) \
1513 return rcStrict2; \
1514 } while (0)
1515
1516
1517/**
1518 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1519 *
1520 * @returns Strict VBox status code.
1521 * @param pIemCpu The IEM state.
1522 * @param pu32 Where to return the opcode double word.
1523 */
1524DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1525{
1526 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1527 if (rcStrict == VINF_SUCCESS)
1528 {
1529 uint8_t offOpcode = pIemCpu->offOpcode;
1530 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1531 pIemCpu->offOpcode = offOpcode + 2;
1532 }
1533 else
1534 *pu32 = 0;
1535 return rcStrict;
1536}
1537
1538
1539/**
1540 * Fetches the next opcode word, zero extending it to a double word.
1541 *
1542 * @returns Strict VBox status code.
1543 * @param pIemCpu The IEM state.
1544 * @param pu32 Where to return the opcode double word.
1545 */
1546DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1547{
1548 uint8_t const offOpcode = pIemCpu->offOpcode;
1549 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1550 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1551
1552 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1553 pIemCpu->offOpcode = offOpcode + 2;
1554 return VINF_SUCCESS;
1555}
1556
1557
1558/**
1559 * Fetches the next opcode word and zero extends it to a double word, returns
1560 * automatically on failure.
1561 *
1562 * @param a_pu32 Where to return the opcode double word.
1563 * @remark Implicitly references pIemCpu.
1564 */
1565#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1566 do \
1567 { \
1568 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1569 if (rcStrict2 != VINF_SUCCESS) \
1570 return rcStrict2; \
1571 } while (0)
1572
1573
1574/**
1575 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1576 *
1577 * @returns Strict VBox status code.
1578 * @param pIemCpu The IEM state.
1579 * @param pu64 Where to return the opcode quad word.
1580 */
1581DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1582{
1583 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1584 if (rcStrict == VINF_SUCCESS)
1585 {
1586 uint8_t offOpcode = pIemCpu->offOpcode;
1587 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1588 pIemCpu->offOpcode = offOpcode + 2;
1589 }
1590 else
1591 *pu64 = 0;
1592 return rcStrict;
1593}
1594
1595
1596/**
1597 * Fetches the next opcode word, zero extending it to a quad word.
1598 *
1599 * @returns Strict VBox status code.
1600 * @param pIemCpu The IEM state.
1601 * @param pu64 Where to return the opcode quad word.
1602 */
1603DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1604{
1605 uint8_t const offOpcode = pIemCpu->offOpcode;
1606 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1607 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1608
1609 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1610 pIemCpu->offOpcode = offOpcode + 2;
1611 return VINF_SUCCESS;
1612}
1613
1614
1615/**
1616 * Fetches the next opcode word and zero extends it to a quad word, returns
1617 * automatically on failure.
1618 *
1619 * @param a_pu64 Where to return the opcode quad word.
1620 * @remark Implicitly references pIemCpu.
1621 */
1622#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1623 do \
1624 { \
1625 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1626 if (rcStrict2 != VINF_SUCCESS) \
1627 return rcStrict2; \
1628 } while (0)
1629
1630
1631/**
1632 * Fetches the next signed word from the opcode stream.
1633 *
1634 * @returns Strict VBox status code.
1635 * @param pIemCpu The IEM state.
1636 * @param pi16 Where to return the signed word.
1637 */
1638DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1639{
1640 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1641}
1642
1643
1644/**
1645 * Fetches the next signed word from the opcode stream, returning automatically
1646 * on failure.
1647 *
1648 * @param a_pi16 Where to return the signed word.
1649 * @remark Implicitly references pIemCpu.
1650 */
1651#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1652 do \
1653 { \
1654 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1655 if (rcStrict2 != VINF_SUCCESS) \
1656 return rcStrict2; \
1657 } while (0)
1658
1659
1660/**
1661 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1662 *
1663 * @returns Strict VBox status code.
1664 * @param pIemCpu The IEM state.
1665 * @param pu32 Where to return the opcode dword.
1666 */
1667DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1668{
1669 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1670 if (rcStrict == VINF_SUCCESS)
1671 {
1672 uint8_t offOpcode = pIemCpu->offOpcode;
1673 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1674 pIemCpu->abOpcode[offOpcode + 1],
1675 pIemCpu->abOpcode[offOpcode + 2],
1676 pIemCpu->abOpcode[offOpcode + 3]);
1677 pIemCpu->offOpcode = offOpcode + 4;
1678 }
1679 else
1680 *pu32 = 0;
1681 return rcStrict;
1682}
1683
1684
1685/**
1686 * Fetches the next opcode dword.
1687 *
1688 * @returns Strict VBox status code.
1689 * @param pIemCpu The IEM state.
1690 * @param pu32 Where to return the opcode double word.
1691 */
1692DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1693{
1694 uint8_t const offOpcode = pIemCpu->offOpcode;
1695 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1696 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1697
1698 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1699 pIemCpu->abOpcode[offOpcode + 1],
1700 pIemCpu->abOpcode[offOpcode + 2],
1701 pIemCpu->abOpcode[offOpcode + 3]);
1702 pIemCpu->offOpcode = offOpcode + 4;
1703 return VINF_SUCCESS;
1704}
1705
1706
1707/**
1708 * Fetches the next opcode dword, returns automatically on failure.
1709 *
1710 * @param a_pu32 Where to return the opcode dword.
1711 * @remark Implicitly references pIemCpu.
1712 */
1713#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1714 do \
1715 { \
1716 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1717 if (rcStrict2 != VINF_SUCCESS) \
1718 return rcStrict2; \
1719 } while (0)
1720
1721
1722/**
1723 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1724 *
1725 * @returns Strict VBox status code.
1726 * @param pIemCpu The IEM state.
1727 * @param pu64 Where to return the opcode dword.
1728 */
1729DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1730{
1731 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1732 if (rcStrict == VINF_SUCCESS)
1733 {
1734 uint8_t offOpcode = pIemCpu->offOpcode;
1735 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1736 pIemCpu->abOpcode[offOpcode + 1],
1737 pIemCpu->abOpcode[offOpcode + 2],
1738 pIemCpu->abOpcode[offOpcode + 3]);
1739 pIemCpu->offOpcode = offOpcode + 4;
1740 }
1741 else
1742 *pu64 = 0;
1743 return rcStrict;
1744}
1745
1746
1747/**
1748 * Fetches the next opcode dword, zero extending it to a quad word.
1749 *
1750 * @returns Strict VBox status code.
1751 * @param pIemCpu The IEM state.
1752 * @param pu64 Where to return the opcode quad word.
1753 */
1754DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1755{
1756 uint8_t const offOpcode = pIemCpu->offOpcode;
1757 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1758 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1759
1760 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1761 pIemCpu->abOpcode[offOpcode + 1],
1762 pIemCpu->abOpcode[offOpcode + 2],
1763 pIemCpu->abOpcode[offOpcode + 3]);
1764 pIemCpu->offOpcode = offOpcode + 4;
1765 return VINF_SUCCESS;
1766}
1767
1768
1769/**
1770 * Fetches the next opcode dword and zero extends it to a quad word, returns
1771 * automatically on failure.
1772 *
1773 * @param a_pu64 Where to return the opcode quad word.
1774 * @remark Implicitly references pIemCpu.
1775 */
1776#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1777 do \
1778 { \
1779 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1780 if (rcStrict2 != VINF_SUCCESS) \
1781 return rcStrict2; \
1782 } while (0)
1783
1784
1785/**
1786 * Fetches the next signed double word from the opcode stream.
1787 *
1788 * @returns Strict VBox status code.
1789 * @param pIemCpu The IEM state.
1790 * @param pi32 Where to return the signed double word.
1791 */
1792DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1793{
1794 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1795}
1796
1797/**
1798 * Fetches the next signed double word from the opcode stream, returning
1799 * automatically on failure.
1800 *
1801 * @param a_pi32 Where to return the signed double word.
1802 * @remark Implicitly references pIemCpu.
1803 */
1804#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1805 do \
1806 { \
1807 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1808 if (rcStrict2 != VINF_SUCCESS) \
1809 return rcStrict2; \
1810 } while (0)
1811
1812
1813/**
1814 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1815 *
1816 * @returns Strict VBox status code.
1817 * @param pIemCpu The IEM state.
1818 * @param pu64 Where to return the opcode qword.
1819 */
1820DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1821{
1822 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1823 if (rcStrict == VINF_SUCCESS)
1824 {
1825 uint8_t offOpcode = pIemCpu->offOpcode;
1826 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1827 pIemCpu->abOpcode[offOpcode + 1],
1828 pIemCpu->abOpcode[offOpcode + 2],
1829 pIemCpu->abOpcode[offOpcode + 3]);
1830 pIemCpu->offOpcode = offOpcode + 4;
1831 }
1832 else
1833 *pu64 = 0;
1834 return rcStrict;
1835}
1836
1837
1838/**
1839 * Fetches the next opcode dword, sign extending it into a quad word.
1840 *
1841 * @returns Strict VBox status code.
1842 * @param pIemCpu The IEM state.
1843 * @param pu64 Where to return the opcode quad word.
1844 */
1845DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1846{
1847 uint8_t const offOpcode = pIemCpu->offOpcode;
1848 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1849 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1850
1851 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1852 pIemCpu->abOpcode[offOpcode + 1],
1853 pIemCpu->abOpcode[offOpcode + 2],
1854 pIemCpu->abOpcode[offOpcode + 3]);
1855 *pu64 = i32;
1856 pIemCpu->offOpcode = offOpcode + 4;
1857 return VINF_SUCCESS;
1858}
1859
1860
1861/**
1862 * Fetches the next opcode double word and sign extends it to a quad word,
1863 * returns automatically on failure.
1864 *
1865 * @param a_pu64 Where to return the opcode quad word.
1866 * @remark Implicitly references pIemCpu.
1867 */
1868#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1869 do \
1870 { \
1871 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1872 if (rcStrict2 != VINF_SUCCESS) \
1873 return rcStrict2; \
1874 } while (0)
1875
1876
1877/**
1878 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1879 *
1880 * @returns Strict VBox status code.
1881 * @param pIemCpu The IEM state.
1882 * @param pu64 Where to return the opcode qword.
1883 */
1884DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1885{
1886 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1887 if (rcStrict == VINF_SUCCESS)
1888 {
1889 uint8_t offOpcode = pIemCpu->offOpcode;
1890 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1891 pIemCpu->abOpcode[offOpcode + 1],
1892 pIemCpu->abOpcode[offOpcode + 2],
1893 pIemCpu->abOpcode[offOpcode + 3],
1894 pIemCpu->abOpcode[offOpcode + 4],
1895 pIemCpu->abOpcode[offOpcode + 5],
1896 pIemCpu->abOpcode[offOpcode + 6],
1897 pIemCpu->abOpcode[offOpcode + 7]);
1898 pIemCpu->offOpcode = offOpcode + 8;
1899 }
1900 else
1901 *pu64 = 0;
1902 return rcStrict;
1903}
1904
1905
1906/**
1907 * Fetches the next opcode qword.
1908 *
1909 * @returns Strict VBox status code.
1910 * @param pIemCpu The IEM state.
1911 * @param pu64 Where to return the opcode qword.
1912 */
1913DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1914{
1915 uint8_t const offOpcode = pIemCpu->offOpcode;
1916 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1917 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1918
1919 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1920 pIemCpu->abOpcode[offOpcode + 1],
1921 pIemCpu->abOpcode[offOpcode + 2],
1922 pIemCpu->abOpcode[offOpcode + 3],
1923 pIemCpu->abOpcode[offOpcode + 4],
1924 pIemCpu->abOpcode[offOpcode + 5],
1925 pIemCpu->abOpcode[offOpcode + 6],
1926 pIemCpu->abOpcode[offOpcode + 7]);
1927 pIemCpu->offOpcode = offOpcode + 8;
1928 return VINF_SUCCESS;
1929}
1930
1931
1932/**
1933 * Fetches the next opcode quad word, returns automatically on failure.
1934 *
1935 * @param a_pu64 Where to return the opcode quad word.
1936 * @remark Implicitly references pIemCpu.
1937 */
1938#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1939 do \
1940 { \
1941 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1942 if (rcStrict2 != VINF_SUCCESS) \
1943 return rcStrict2; \
1944 } while (0)
1945
1946
1947/** @name Misc Worker Functions.
1948 * @{
1949 */
1950
1951
1952/**
1953 * Validates a new SS segment.
1954 *
1955 * @returns VBox strict status code.
1956 * @param pIemCpu The IEM per CPU instance data.
1957 * @param pCtx The CPU context.
1958 * @param NewSS The new SS selctor.
1959 * @param uCpl The CPL to load the stack for.
1960 * @param pDesc Where to return the descriptor.
1961 */
1962IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1963{
1964 NOREF(pCtx);
1965
1966 /* Null selectors are not allowed (we're not called for dispatching
1967 interrupts with SS=0 in long mode). */
1968 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1969 {
1970 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1971 return iemRaiseTaskSwitchFault0(pIemCpu);
1972 }
1973
1974 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1975 if ((NewSS & X86_SEL_RPL) != uCpl)
1976 {
1977 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1978 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1979 }
1980
1981 /*
1982 * Read the descriptor.
1983 */
1984 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1985 if (rcStrict != VINF_SUCCESS)
1986 return rcStrict;
1987
1988 /*
1989 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1990 */
1991 if (!pDesc->Legacy.Gen.u1DescType)
1992 {
1993 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1994 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1995 }
1996
1997 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1998 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1999 {
2000 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2001 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2002 }
2003 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2004 {
2005 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2006 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2007 }
2008
2009 /* Is it there? */
2010 /** @todo testcase: Is this checked before the canonical / limit check below? */
2011 if (!pDesc->Legacy.Gen.u1Present)
2012 {
2013 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2014 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2015 }
2016
2017 return VINF_SUCCESS;
2018}
2019
2020
2021/**
2022 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2023 * not.
2024 *
2025 * @param a_pIemCpu The IEM per CPU data.
2026 * @param a_pCtx The CPU context.
2027 */
2028#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2029# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2030 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2031 ? (a_pCtx)->eflags.u \
2032 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2033#else
2034# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2035 ( (a_pCtx)->eflags.u )
2036#endif
2037
2038/**
2039 * Updates the EFLAGS in the correct manner wrt. PATM.
2040 *
2041 * @param a_pIemCpu The IEM per CPU data.
2042 * @param a_pCtx The CPU context.
2043 * @param a_fEfl The new EFLAGS.
2044 */
2045#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2046# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2047 do { \
2048 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2049 (a_pCtx)->eflags.u = (a_fEfl); \
2050 else \
2051 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2052 } while (0)
2053#else
2054# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2055 do { \
2056 (a_pCtx)->eflags.u = (a_fEfl); \
2057 } while (0)
2058#endif
2059
2060
2061/** @} */
2062
2063/** @name Raising Exceptions.
2064 *
2065 * @{
2066 */
2067
2068/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2069 * @{ */
2070/** CPU exception. */
2071#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2072/** External interrupt (from PIC, APIC, whatever). */
2073#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2074/** Software interrupt (int or into, not bound).
2075 * Returns to the following instruction */
2076#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2077/** Takes an error code. */
2078#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2079/** Takes a CR2. */
2080#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2081/** Generated by the breakpoint instruction. */
2082#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2083/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2084#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2085/** @} */
2086
2087
2088/**
2089 * Loads the specified stack far pointer from the TSS.
2090 *
2091 * @returns VBox strict status code.
2092 * @param pIemCpu The IEM per CPU instance data.
2093 * @param pCtx The CPU context.
2094 * @param uCpl The CPL to load the stack for.
2095 * @param pSelSS Where to return the new stack segment.
2096 * @param puEsp Where to return the new stack pointer.
2097 */
2098IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2099 PRTSEL pSelSS, uint32_t *puEsp)
2100{
2101 VBOXSTRICTRC rcStrict;
2102 Assert(uCpl < 4);
2103 *puEsp = 0; /* make gcc happy */
2104 *pSelSS = 0; /* make gcc happy */
2105
2106 switch (pCtx->tr.Attr.n.u4Type)
2107 {
2108 /*
2109 * 16-bit TSS (X86TSS16).
2110 */
2111 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2112 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2113 {
2114 uint32_t off = uCpl * 4 + 2;
2115 if (off + 4 > pCtx->tr.u32Limit)
2116 {
2117 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2118 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2119 }
2120
2121 uint32_t u32Tmp = 0; /* gcc maybe... */
2122 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2123 if (rcStrict == VINF_SUCCESS)
2124 {
2125 *puEsp = RT_LOWORD(u32Tmp);
2126 *pSelSS = RT_HIWORD(u32Tmp);
2127 return VINF_SUCCESS;
2128 }
2129 break;
2130 }
2131
2132 /*
2133 * 32-bit TSS (X86TSS32).
2134 */
2135 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2136 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2137 {
2138 uint32_t off = uCpl * 8 + 4;
2139 if (off + 7 > pCtx->tr.u32Limit)
2140 {
2141 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2142 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2143 }
2144
2145 uint64_t u64Tmp;
2146 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2147 if (rcStrict == VINF_SUCCESS)
2148 {
2149 *puEsp = u64Tmp & UINT32_MAX;
2150 *pSelSS = (RTSEL)(u64Tmp >> 32);
2151 return VINF_SUCCESS;
2152 }
2153 break;
2154 }
2155
2156 default:
2157 AssertFailedReturn(VERR_IEM_IPE_4);
2158 }
2159 return rcStrict;
2160}
2161
2162
2163/**
2164 * Loads the specified stack pointer from the 64-bit TSS.
2165 *
2166 * @returns VBox strict status code.
2167 * @param pIemCpu The IEM per CPU instance data.
2168 * @param pCtx The CPU context.
2169 * @param uCpl The CPL to load the stack for.
2170 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2171 * @param puRsp Where to return the new stack pointer.
2172 */
2173IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2174{
2175 Assert(uCpl < 4);
2176 Assert(uIst < 8);
2177 *puRsp = 0; /* make gcc happy */
2178
2179 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2180
2181 uint32_t off;
2182 if (uIst)
2183 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2184 else
2185 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2186 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2187 {
2188 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2189 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2190 }
2191
2192 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2193}
2194
2195
2196/**
2197 * Adjust the CPU state according to the exception being raised.
2198 *
2199 * @param pCtx The CPU context.
2200 * @param u8Vector The exception that has been raised.
2201 */
2202DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2203{
2204 switch (u8Vector)
2205 {
2206 case X86_XCPT_DB:
2207 pCtx->dr[7] &= ~X86_DR7_GD;
2208 break;
2209 /** @todo Read the AMD and Intel exception reference... */
2210 }
2211}
2212
2213
2214/**
2215 * Implements exceptions and interrupts for real mode.
2216 *
2217 * @returns VBox strict status code.
2218 * @param pIemCpu The IEM per CPU instance data.
2219 * @param pCtx The CPU context.
2220 * @param cbInstr The number of bytes to offset rIP by in the return
2221 * address.
2222 * @param u8Vector The interrupt / exception vector number.
2223 * @param fFlags The flags.
2224 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2225 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2226 */
2227IEM_STATIC VBOXSTRICTRC
2228iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2229 PCPUMCTX pCtx,
2230 uint8_t cbInstr,
2231 uint8_t u8Vector,
2232 uint32_t fFlags,
2233 uint16_t uErr,
2234 uint64_t uCr2)
2235{
2236 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2237 NOREF(uErr); NOREF(uCr2);
2238
2239 /*
2240 * Read the IDT entry.
2241 */
2242 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2243 {
2244 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2245 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2246 }
2247 RTFAR16 Idte;
2248 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2249 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2250 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2251 return rcStrict;
2252
2253 /*
2254 * Push the stack frame.
2255 */
2256 uint16_t *pu16Frame;
2257 uint64_t uNewRsp;
2258 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2259 if (rcStrict != VINF_SUCCESS)
2260 return rcStrict;
2261
2262 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2263 pu16Frame[2] = (uint16_t)fEfl;
2264 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2265 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2266 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2267 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2268 return rcStrict;
2269
2270 /*
2271 * Load the vector address into cs:ip and make exception specific state
2272 * adjustments.
2273 */
2274 pCtx->cs.Sel = Idte.sel;
2275 pCtx->cs.ValidSel = Idte.sel;
2276 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2277 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2278 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2279 pCtx->rip = Idte.off;
2280 fEfl &= ~X86_EFL_IF;
2281 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2282
2283 /** @todo do we actually do this in real mode? */
2284 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2285 iemRaiseXcptAdjustState(pCtx, u8Vector);
2286
2287 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2288}
2289
2290
2291/**
2292 * Loads a NULL data selector into when coming from V8086 mode.
2293 *
2294 * @param pIemCpu The IEM per CPU instance data.
2295 * @param pSReg Pointer to the segment register.
2296 */
2297IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2298{
2299 pSReg->Sel = 0;
2300 pSReg->ValidSel = 0;
2301 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2302 {
2303 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2304 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2305 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2306 }
2307 else
2308 {
2309 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2310 /** @todo check this on AMD-V */
2311 pSReg->u64Base = 0;
2312 pSReg->u32Limit = 0;
2313 }
2314}
2315
2316
2317/**
2318 * Loads a segment selector during a task switch in V8086 mode.
2319 *
2320 * @param pIemCpu The IEM per CPU instance data.
2321 * @param pSReg Pointer to the segment register.
2322 * @param uSel The selector value to load.
2323 */
2324IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2325{
2326 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2327 pSReg->Sel = uSel;
2328 pSReg->ValidSel = uSel;
2329 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2330 pSReg->u64Base = uSel << 4;
2331 pSReg->u32Limit = 0xffff;
2332 pSReg->Attr.u = 0xf3;
2333}
2334
2335
2336/**
2337 * Loads a NULL data selector into a selector register, both the hidden and
2338 * visible parts, in protected mode.
2339 *
2340 * @param pIemCpu The IEM state of the calling EMT.
2341 * @param pSReg Pointer to the segment register.
2342 * @param uRpl The RPL.
2343 */
2344IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2345{
2346 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2347 * data selector in protected mode. */
2348 pSReg->Sel = uRpl;
2349 pSReg->ValidSel = uRpl;
2350 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2351 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2352 {
2353 /* VT-x (Intel 3960x) observed doing something like this. */
2354 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2355 pSReg->u32Limit = UINT32_MAX;
2356 pSReg->u64Base = 0;
2357 }
2358 else
2359 {
2360 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2361 pSReg->u32Limit = 0;
2362 pSReg->u64Base = 0;
2363 }
2364}
2365
2366
2367/**
2368 * Loads a segment selector during a task switch in protected mode.
2369 *
2370 * In this task switch scenario, we would throw \#TS exceptions rather than
2371 * \#GPs.
2372 *
2373 * @returns VBox strict status code.
2374 * @param pIemCpu The IEM per CPU instance data.
2375 * @param pSReg Pointer to the segment register.
2376 * @param uSel The new selector value.
2377 *
2378 * @remarks This does _not_ handle CS or SS.
2379 * @remarks This expects pIemCpu->uCpl to be up to date.
2380 */
2381IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2382{
2383 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2384
2385 /* Null data selector. */
2386 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2387 {
2388 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2390 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2391 return VINF_SUCCESS;
2392 }
2393
2394 /* Fetch the descriptor. */
2395 IEMSELDESC Desc;
2396 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2397 if (rcStrict != VINF_SUCCESS)
2398 {
2399 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2400 VBOXSTRICTRC_VAL(rcStrict)));
2401 return rcStrict;
2402 }
2403
2404 /* Must be a data segment or readable code segment. */
2405 if ( !Desc.Legacy.Gen.u1DescType
2406 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2407 {
2408 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2409 Desc.Legacy.Gen.u4Type));
2410 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2411 }
2412
2413 /* Check privileges for data segments and non-conforming code segments. */
2414 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2415 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2416 {
2417 /* The RPL and the new CPL must be less than or equal to the DPL. */
2418 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2419 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2420 {
2421 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2422 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2423 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2424 }
2425 }
2426
2427 /* Is it there? */
2428 if (!Desc.Legacy.Gen.u1Present)
2429 {
2430 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2431 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2432 }
2433
2434 /* The base and limit. */
2435 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2436 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2437
2438 /*
2439 * Ok, everything checked out fine. Now set the accessed bit before
2440 * committing the result into the registers.
2441 */
2442 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2443 {
2444 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2445 if (rcStrict != VINF_SUCCESS)
2446 return rcStrict;
2447 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2448 }
2449
2450 /* Commit */
2451 pSReg->Sel = uSel;
2452 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2453 pSReg->u32Limit = cbLimit;
2454 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2455 pSReg->ValidSel = uSel;
2456 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2457 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2458 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2459
2460 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2461 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2462 return VINF_SUCCESS;
2463}
2464
2465
2466/**
2467 * Performs a task switch.
2468 *
2469 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2470 * caller is responsible for performing the necessary checks (like DPL, TSS
2471 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2472 * reference for JMP, CALL, IRET.
2473 *
2474 * If the task switch is the due to a software interrupt or hardware exception,
2475 * the caller is responsible for validating the TSS selector and descriptor. See
2476 * Intel Instruction reference for INT n.
2477 *
2478 * @returns VBox strict status code.
2479 * @param pIemCpu The IEM per CPU instance data.
2480 * @param pCtx The CPU context.
2481 * @param enmTaskSwitch What caused this task switch.
2482 * @param uNextEip The EIP effective after the task switch.
2483 * @param fFlags The flags.
2484 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2485 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2486 * @param SelTSS The TSS selector of the new task.
2487 * @param pNewDescTSS Pointer to the new TSS descriptor.
2488 */
2489IEM_STATIC VBOXSTRICTRC
2490iemTaskSwitch(PIEMCPU pIemCpu,
2491 PCPUMCTX pCtx,
2492 IEMTASKSWITCH enmTaskSwitch,
2493 uint32_t uNextEip,
2494 uint32_t fFlags,
2495 uint16_t uErr,
2496 uint64_t uCr2,
2497 RTSEL SelTSS,
2498 PIEMSELDESC pNewDescTSS)
2499{
2500 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2501 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2502
2503 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2504 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2505 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2506 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2507 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2508
2509 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2510 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2511
2512 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2513 fIsNewTSS386, pCtx->eip, uNextEip));
2514
2515 /* Update CR2 in case it's a page-fault. */
2516 /** @todo This should probably be done much earlier in IEM/PGM. See
2517 * @bugref{5653#c49}. */
2518 if (fFlags & IEM_XCPT_FLAGS_CR2)
2519 pCtx->cr2 = uCr2;
2520
2521 /*
2522 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2523 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2524 */
2525 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2526 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2527 if (uNewTSSLimit < uNewTSSLimitMin)
2528 {
2529 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2530 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2531 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2532 }
2533
2534 /*
2535 * Check the current TSS limit. The last written byte to the current TSS during the
2536 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2537 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2538 *
2539 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2540 * end up with smaller than "legal" TSS limits.
2541 */
2542 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2543 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2544 if (uCurTSSLimit < uCurTSSLimitMin)
2545 {
2546 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2547 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2548 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2549 }
2550
2551 /*
2552 * Verify that the new TSS can be accessed and map it. Map only the required contents
2553 * and not the entire TSS.
2554 */
2555 void *pvNewTSS;
2556 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2557 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2558 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2559 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2560 * not perform correct translation if this happens. See Intel spec. 7.2.1
2561 * "Task-State Segment" */
2562 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2563 if (rcStrict != VINF_SUCCESS)
2564 {
2565 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2566 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2567 return rcStrict;
2568 }
2569
2570 /*
2571 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2572 */
2573 uint32_t u32EFlags = pCtx->eflags.u32;
2574 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2575 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2576 {
2577 PX86DESC pDescCurTSS;
2578 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2579 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2580 if (rcStrict != VINF_SUCCESS)
2581 {
2582 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2583 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2584 return rcStrict;
2585 }
2586
2587 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2588 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2589 if (rcStrict != VINF_SUCCESS)
2590 {
2591 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2592 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2593 return rcStrict;
2594 }
2595
2596 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2597 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2598 {
2599 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2600 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2601 u32EFlags &= ~X86_EFL_NT;
2602 }
2603 }
2604
2605 /*
2606 * Save the CPU state into the current TSS.
2607 */
2608 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2609 if (GCPtrNewTSS == GCPtrCurTSS)
2610 {
2611 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2612 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2613 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2614 }
2615 if (fIsNewTSS386)
2616 {
2617 /*
2618 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2619 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2620 */
2621 void *pvCurTSS32;
2622 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2623 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2624 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2625 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2626 if (rcStrict != VINF_SUCCESS)
2627 {
2628 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2629 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2630 return rcStrict;
2631 }
2632
2633 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2634 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2635 pCurTSS32->eip = uNextEip;
2636 pCurTSS32->eflags = u32EFlags;
2637 pCurTSS32->eax = pCtx->eax;
2638 pCurTSS32->ecx = pCtx->ecx;
2639 pCurTSS32->edx = pCtx->edx;
2640 pCurTSS32->ebx = pCtx->ebx;
2641 pCurTSS32->esp = pCtx->esp;
2642 pCurTSS32->ebp = pCtx->ebp;
2643 pCurTSS32->esi = pCtx->esi;
2644 pCurTSS32->edi = pCtx->edi;
2645 pCurTSS32->es = pCtx->es.Sel;
2646 pCurTSS32->cs = pCtx->cs.Sel;
2647 pCurTSS32->ss = pCtx->ss.Sel;
2648 pCurTSS32->ds = pCtx->ds.Sel;
2649 pCurTSS32->fs = pCtx->fs.Sel;
2650 pCurTSS32->gs = pCtx->gs.Sel;
2651
2652 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2653 if (rcStrict != VINF_SUCCESS)
2654 {
2655 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2656 VBOXSTRICTRC_VAL(rcStrict)));
2657 return rcStrict;
2658 }
2659 }
2660 else
2661 {
2662 /*
2663 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2664 */
2665 void *pvCurTSS16;
2666 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2667 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2668 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2669 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2670 if (rcStrict != VINF_SUCCESS)
2671 {
2672 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2673 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2674 return rcStrict;
2675 }
2676
2677 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2678 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2679 pCurTSS16->ip = uNextEip;
2680 pCurTSS16->flags = u32EFlags;
2681 pCurTSS16->ax = pCtx->ax;
2682 pCurTSS16->cx = pCtx->cx;
2683 pCurTSS16->dx = pCtx->dx;
2684 pCurTSS16->bx = pCtx->bx;
2685 pCurTSS16->sp = pCtx->sp;
2686 pCurTSS16->bp = pCtx->bp;
2687 pCurTSS16->si = pCtx->si;
2688 pCurTSS16->di = pCtx->di;
2689 pCurTSS16->es = pCtx->es.Sel;
2690 pCurTSS16->cs = pCtx->cs.Sel;
2691 pCurTSS16->ss = pCtx->ss.Sel;
2692 pCurTSS16->ds = pCtx->ds.Sel;
2693
2694 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2695 if (rcStrict != VINF_SUCCESS)
2696 {
2697 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2698 VBOXSTRICTRC_VAL(rcStrict)));
2699 return rcStrict;
2700 }
2701 }
2702
2703 /*
2704 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2705 */
2706 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2707 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2708 {
2709 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2710 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2711 pNewTSS->selPrev = pCtx->tr.Sel;
2712 }
2713
2714 /*
2715 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2716 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2717 */
2718 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2719 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2720 bool fNewDebugTrap;
2721 if (fIsNewTSS386)
2722 {
2723 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2724 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2725 uNewEip = pNewTSS32->eip;
2726 uNewEflags = pNewTSS32->eflags;
2727 uNewEax = pNewTSS32->eax;
2728 uNewEcx = pNewTSS32->ecx;
2729 uNewEdx = pNewTSS32->edx;
2730 uNewEbx = pNewTSS32->ebx;
2731 uNewEsp = pNewTSS32->esp;
2732 uNewEbp = pNewTSS32->ebp;
2733 uNewEsi = pNewTSS32->esi;
2734 uNewEdi = pNewTSS32->edi;
2735 uNewES = pNewTSS32->es;
2736 uNewCS = pNewTSS32->cs;
2737 uNewSS = pNewTSS32->ss;
2738 uNewDS = pNewTSS32->ds;
2739 uNewFS = pNewTSS32->fs;
2740 uNewGS = pNewTSS32->gs;
2741 uNewLdt = pNewTSS32->selLdt;
2742 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2743 }
2744 else
2745 {
2746 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2747 uNewCr3 = 0;
2748 uNewEip = pNewTSS16->ip;
2749 uNewEflags = pNewTSS16->flags;
2750 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2751 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2752 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2753 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2754 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2755 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2756 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2757 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2758 uNewES = pNewTSS16->es;
2759 uNewCS = pNewTSS16->cs;
2760 uNewSS = pNewTSS16->ss;
2761 uNewDS = pNewTSS16->ds;
2762 uNewFS = 0;
2763 uNewGS = 0;
2764 uNewLdt = pNewTSS16->selLdt;
2765 fNewDebugTrap = false;
2766 }
2767
2768 if (GCPtrNewTSS == GCPtrCurTSS)
2769 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2770 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2771
2772 /*
2773 * We're done accessing the new TSS.
2774 */
2775 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2776 if (rcStrict != VINF_SUCCESS)
2777 {
2778 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2779 return rcStrict;
2780 }
2781
2782 /*
2783 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2784 */
2785 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2786 {
2787 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2788 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2789 if (rcStrict != VINF_SUCCESS)
2790 {
2791 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2792 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2793 return rcStrict;
2794 }
2795
2796 /* Check that the descriptor indicates the new TSS is available (not busy). */
2797 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2798 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2799 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2800
2801 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2802 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2803 if (rcStrict != VINF_SUCCESS)
2804 {
2805 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2806 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2807 return rcStrict;
2808 }
2809 }
2810
2811 /*
2812 * From this point on, we're technically in the new task. We will defer exceptions
2813 * until the completion of the task switch but before executing any instructions in the new task.
2814 */
2815 pCtx->tr.Sel = SelTSS;
2816 pCtx->tr.ValidSel = SelTSS;
2817 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2818 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2819 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2820 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2821 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2822
2823 /* Set the busy bit in TR. */
2824 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2825 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2826 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2827 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2828 {
2829 uNewEflags |= X86_EFL_NT;
2830 }
2831
2832 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2833 pCtx->cr0 |= X86_CR0_TS;
2834 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2835
2836 pCtx->eip = uNewEip;
2837 pCtx->eax = uNewEax;
2838 pCtx->ecx = uNewEcx;
2839 pCtx->edx = uNewEdx;
2840 pCtx->ebx = uNewEbx;
2841 pCtx->esp = uNewEsp;
2842 pCtx->ebp = uNewEbp;
2843 pCtx->esi = uNewEsi;
2844 pCtx->edi = uNewEdi;
2845
2846 uNewEflags &= X86_EFL_LIVE_MASK;
2847 uNewEflags |= X86_EFL_RA1_MASK;
2848 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2849
2850 /*
2851 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2852 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2853 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2854 */
2855 pCtx->es.Sel = uNewES;
2856 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2857 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2858
2859 pCtx->cs.Sel = uNewCS;
2860 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2861 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2862
2863 pCtx->ss.Sel = uNewSS;
2864 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2865 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2866
2867 pCtx->ds.Sel = uNewDS;
2868 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2869 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2870
2871 pCtx->fs.Sel = uNewFS;
2872 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2873 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2874
2875 pCtx->gs.Sel = uNewGS;
2876 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2877 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2878 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2879
2880 pCtx->ldtr.Sel = uNewLdt;
2881 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2882 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2883 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2884
2885 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2886 {
2887 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2888 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2889 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2890 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2891 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2892 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2893 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2894 }
2895
2896 /*
2897 * Switch CR3 for the new task.
2898 */
2899 if ( fIsNewTSS386
2900 && (pCtx->cr0 & X86_CR0_PG))
2901 {
2902 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2903 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2904 {
2905 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2906 AssertRCSuccessReturn(rc, rc);
2907 }
2908 else
2909 pCtx->cr3 = uNewCr3;
2910
2911 /* Inform PGM. */
2912 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2913 {
2914 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2915 AssertRCReturn(rc, rc);
2916 /* ignore informational status codes */
2917 }
2918 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2919 }
2920
2921 /*
2922 * Switch LDTR for the new task.
2923 */
2924 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2925 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2926 else
2927 {
2928 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2929
2930 IEMSELDESC DescNewLdt;
2931 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2932 if (rcStrict != VINF_SUCCESS)
2933 {
2934 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2935 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2936 return rcStrict;
2937 }
2938 if ( !DescNewLdt.Legacy.Gen.u1Present
2939 || DescNewLdt.Legacy.Gen.u1DescType
2940 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2941 {
2942 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2943 uNewLdt, DescNewLdt.Legacy.u));
2944 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2945 }
2946
2947 pCtx->ldtr.ValidSel = uNewLdt;
2948 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2949 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2950 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2951 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2952 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2953 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2954 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2955 }
2956
2957 IEMSELDESC DescSS;
2958 if (IEM_IS_V86_MODE(pIemCpu))
2959 {
2960 pIemCpu->uCpl = 3;
2961 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
2962 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
2963 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
2964 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
2965 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
2966 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
2967 }
2968 else
2969 {
2970 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
2971
2972 /*
2973 * Load the stack segment for the new task.
2974 */
2975 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2976 {
2977 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2978 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2979 }
2980
2981 /* Fetch the descriptor. */
2982 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
2983 if (rcStrict != VINF_SUCCESS)
2984 {
2985 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2986 VBOXSTRICTRC_VAL(rcStrict)));
2987 return rcStrict;
2988 }
2989
2990 /* SS must be a data segment and writable. */
2991 if ( !DescSS.Legacy.Gen.u1DescType
2992 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2993 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2994 {
2995 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2996 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2997 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2998 }
2999
3000 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3001 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3002 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3003 {
3004 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3005 uNewCpl));
3006 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3007 }
3008
3009 /* Is it there? */
3010 if (!DescSS.Legacy.Gen.u1Present)
3011 {
3012 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3013 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3014 }
3015
3016 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3017 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3018
3019 /* Set the accessed bit before committing the result into SS. */
3020 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3021 {
3022 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3023 if (rcStrict != VINF_SUCCESS)
3024 return rcStrict;
3025 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3026 }
3027
3028 /* Commit SS. */
3029 pCtx->ss.Sel = uNewSS;
3030 pCtx->ss.ValidSel = uNewSS;
3031 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3032 pCtx->ss.u32Limit = cbLimit;
3033 pCtx->ss.u64Base = u64Base;
3034 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3035 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3036
3037 /* CPL has changed, update IEM before loading rest of segments. */
3038 pIemCpu->uCpl = uNewCpl;
3039
3040 /*
3041 * Load the data segments for the new task.
3042 */
3043 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3044 if (rcStrict != VINF_SUCCESS)
3045 return rcStrict;
3046 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3047 if (rcStrict != VINF_SUCCESS)
3048 return rcStrict;
3049 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3050 if (rcStrict != VINF_SUCCESS)
3051 return rcStrict;
3052 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3053 if (rcStrict != VINF_SUCCESS)
3054 return rcStrict;
3055
3056 /*
3057 * Load the code segment for the new task.
3058 */
3059 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3060 {
3061 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3062 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3063 }
3064
3065 /* Fetch the descriptor. */
3066 IEMSELDESC DescCS;
3067 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3068 if (rcStrict != VINF_SUCCESS)
3069 {
3070 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3071 return rcStrict;
3072 }
3073
3074 /* CS must be a code segment. */
3075 if ( !DescCS.Legacy.Gen.u1DescType
3076 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3077 {
3078 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3079 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3080 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3081 }
3082
3083 /* For conforming CS, DPL must be less than or equal to the RPL. */
3084 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3085 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3086 {
3087 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3088 DescCS.Legacy.Gen.u2Dpl));
3089 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3090 }
3091
3092 /* For non-conforming CS, DPL must match RPL. */
3093 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3094 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3095 {
3096 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3097 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3098 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3099 }
3100
3101 /* Is it there? */
3102 if (!DescCS.Legacy.Gen.u1Present)
3103 {
3104 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3105 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3106 }
3107
3108 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3109 u64Base = X86DESC_BASE(&DescCS.Legacy);
3110
3111 /* Set the accessed bit before committing the result into CS. */
3112 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3113 {
3114 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3115 if (rcStrict != VINF_SUCCESS)
3116 return rcStrict;
3117 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3118 }
3119
3120 /* Commit CS. */
3121 pCtx->cs.Sel = uNewCS;
3122 pCtx->cs.ValidSel = uNewCS;
3123 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3124 pCtx->cs.u32Limit = cbLimit;
3125 pCtx->cs.u64Base = u64Base;
3126 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3128 }
3129
3130 /** @todo Debug trap. */
3131 if (fIsNewTSS386 && fNewDebugTrap)
3132 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3133
3134 /*
3135 * Construct the error code masks based on what caused this task switch.
3136 * See Intel Instruction reference for INT.
3137 */
3138 uint16_t uExt;
3139 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3140 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3141 {
3142 uExt = 1;
3143 }
3144 else
3145 uExt = 0;
3146
3147 /*
3148 * Push any error code on to the new stack.
3149 */
3150 if (fFlags & IEM_XCPT_FLAGS_ERR)
3151 {
3152 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3153 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3154 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3155
3156 /* Check that there is sufficient space on the stack. */
3157 /** @todo Factor out segment limit checking for normal/expand down segments
3158 * into a separate function. */
3159 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3160 {
3161 if ( pCtx->esp - 1 > cbLimitSS
3162 || pCtx->esp < cbStackFrame)
3163 {
3164 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3165 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3166 cbStackFrame));
3167 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3168 }
3169 }
3170 else
3171 {
3172 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3173 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3174 {
3175 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3176 cbStackFrame));
3177 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3178 }
3179 }
3180
3181
3182 if (fIsNewTSS386)
3183 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3184 else
3185 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3186 if (rcStrict != VINF_SUCCESS)
3187 {
3188 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3189 VBOXSTRICTRC_VAL(rcStrict)));
3190 return rcStrict;
3191 }
3192 }
3193
3194 /* Check the new EIP against the new CS limit. */
3195 if (pCtx->eip > pCtx->cs.u32Limit)
3196 {
3197 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3198 pCtx->eip, pCtx->cs.u32Limit));
3199 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3200 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3201 }
3202
3203 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3204 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3205}
3206
3207
3208/**
3209 * Implements exceptions and interrupts for protected mode.
3210 *
3211 * @returns VBox strict status code.
3212 * @param pIemCpu The IEM per CPU instance data.
3213 * @param pCtx The CPU context.
3214 * @param cbInstr The number of bytes to offset rIP by in the return
3215 * address.
3216 * @param u8Vector The interrupt / exception vector number.
3217 * @param fFlags The flags.
3218 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3219 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3220 */
3221IEM_STATIC VBOXSTRICTRC
3222iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3223 PCPUMCTX pCtx,
3224 uint8_t cbInstr,
3225 uint8_t u8Vector,
3226 uint32_t fFlags,
3227 uint16_t uErr,
3228 uint64_t uCr2)
3229{
3230 /*
3231 * Read the IDT entry.
3232 */
3233 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3234 {
3235 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3236 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3237 }
3238 X86DESC Idte;
3239 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3240 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3241 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3242 return rcStrict;
3243 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3244 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3245 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3246
3247 /*
3248 * Check the descriptor type, DPL and such.
3249 * ASSUMES this is done in the same order as described for call-gate calls.
3250 */
3251 if (Idte.Gate.u1DescType)
3252 {
3253 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3254 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3255 }
3256 bool fTaskGate = false;
3257 uint8_t f32BitGate = true;
3258 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3259 switch (Idte.Gate.u4Type)
3260 {
3261 case X86_SEL_TYPE_SYS_UNDEFINED:
3262 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3263 case X86_SEL_TYPE_SYS_LDT:
3264 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3265 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3266 case X86_SEL_TYPE_SYS_UNDEFINED2:
3267 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3268 case X86_SEL_TYPE_SYS_UNDEFINED3:
3269 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3270 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3271 case X86_SEL_TYPE_SYS_UNDEFINED4:
3272 {
3273 /** @todo check what actually happens when the type is wrong...
3274 * esp. call gates. */
3275 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3276 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3277 }
3278
3279 case X86_SEL_TYPE_SYS_286_INT_GATE:
3280 f32BitGate = false;
3281 case X86_SEL_TYPE_SYS_386_INT_GATE:
3282 fEflToClear |= X86_EFL_IF;
3283 break;
3284
3285 case X86_SEL_TYPE_SYS_TASK_GATE:
3286 fTaskGate = true;
3287#ifndef IEM_IMPLEMENTS_TASKSWITCH
3288 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3289#endif
3290 break;
3291
3292 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3293 f32BitGate = false;
3294 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3295 break;
3296
3297 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3298 }
3299
3300 /* Check DPL against CPL if applicable. */
3301 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3302 {
3303 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3304 {
3305 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3306 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3307 }
3308 }
3309
3310 /* Is it there? */
3311 if (!Idte.Gate.u1Present)
3312 {
3313 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3314 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3315 }
3316
3317 /* Is it a task-gate? */
3318 if (fTaskGate)
3319 {
3320 /*
3321 * Construct the error code masks based on what caused this task switch.
3322 * See Intel Instruction reference for INT.
3323 */
3324 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3325 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3326 RTSEL SelTSS = Idte.Gate.u16Sel;
3327
3328 /*
3329 * Fetch the TSS descriptor in the GDT.
3330 */
3331 IEMSELDESC DescTSS;
3332 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3333 if (rcStrict != VINF_SUCCESS)
3334 {
3335 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3336 VBOXSTRICTRC_VAL(rcStrict)));
3337 return rcStrict;
3338 }
3339
3340 /* The TSS descriptor must be a system segment and be available (not busy). */
3341 if ( DescTSS.Legacy.Gen.u1DescType
3342 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3343 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3344 {
3345 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3346 u8Vector, SelTSS, DescTSS.Legacy.au64));
3347 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3348 }
3349
3350 /* The TSS must be present. */
3351 if (!DescTSS.Legacy.Gen.u1Present)
3352 {
3353 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3354 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3355 }
3356
3357 /* Do the actual task switch. */
3358 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3359 }
3360
3361 /* A null CS is bad. */
3362 RTSEL NewCS = Idte.Gate.u16Sel;
3363 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3364 {
3365 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3366 return iemRaiseGeneralProtectionFault0(pIemCpu);
3367 }
3368
3369 /* Fetch the descriptor for the new CS. */
3370 IEMSELDESC DescCS;
3371 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3372 if (rcStrict != VINF_SUCCESS)
3373 {
3374 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3375 return rcStrict;
3376 }
3377
3378 /* Must be a code segment. */
3379 if (!DescCS.Legacy.Gen.u1DescType)
3380 {
3381 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3382 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3383 }
3384 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3385 {
3386 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3387 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3388 }
3389
3390 /* Don't allow lowering the privilege level. */
3391 /** @todo Does the lowering of privileges apply to software interrupts
3392 * only? This has bearings on the more-privileged or
3393 * same-privilege stack behavior further down. A testcase would
3394 * be nice. */
3395 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3396 {
3397 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3398 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3399 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3400 }
3401
3402 /* Make sure the selector is present. */
3403 if (!DescCS.Legacy.Gen.u1Present)
3404 {
3405 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3406 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3407 }
3408
3409 /* Check the new EIP against the new CS limit. */
3410 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3411 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3412 ? Idte.Gate.u16OffsetLow
3413 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3414 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3415 if (uNewEip > cbLimitCS)
3416 {
3417 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3418 u8Vector, uNewEip, cbLimitCS, NewCS));
3419 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3420 }
3421
3422 /* Calc the flag image to push. */
3423 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3424 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3425 fEfl &= ~X86_EFL_RF;
3426 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3427 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3428
3429 /* From V8086 mode only go to CPL 0. */
3430 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3431 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3432 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3433 {
3434 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3435 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3436 }
3437
3438 /*
3439 * If the privilege level changes, we need to get a new stack from the TSS.
3440 * This in turns means validating the new SS and ESP...
3441 */
3442 if (uNewCpl != pIemCpu->uCpl)
3443 {
3444 RTSEL NewSS;
3445 uint32_t uNewEsp;
3446 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3447 if (rcStrict != VINF_SUCCESS)
3448 return rcStrict;
3449
3450 IEMSELDESC DescSS;
3451 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3452 if (rcStrict != VINF_SUCCESS)
3453 return rcStrict;
3454
3455 /* Check that there is sufficient space for the stack frame. */
3456 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3457 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3458 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3459 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3460
3461 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3462 {
3463 if ( uNewEsp - 1 > cbLimitSS
3464 || uNewEsp < cbStackFrame)
3465 {
3466 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3467 u8Vector, NewSS, uNewEsp, cbStackFrame));
3468 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3469 }
3470 }
3471 else
3472 {
3473 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3474 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3475 {
3476 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3477 u8Vector, NewSS, uNewEsp, cbStackFrame));
3478 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3479 }
3480 }
3481
3482 /*
3483 * Start making changes.
3484 */
3485
3486 /* Create the stack frame. */
3487 RTPTRUNION uStackFrame;
3488 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3489 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3490 if (rcStrict != VINF_SUCCESS)
3491 return rcStrict;
3492 void * const pvStackFrame = uStackFrame.pv;
3493 if (f32BitGate)
3494 {
3495 if (fFlags & IEM_XCPT_FLAGS_ERR)
3496 *uStackFrame.pu32++ = uErr;
3497 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3498 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3499 uStackFrame.pu32[2] = fEfl;
3500 uStackFrame.pu32[3] = pCtx->esp;
3501 uStackFrame.pu32[4] = pCtx->ss.Sel;
3502 if (fEfl & X86_EFL_VM)
3503 {
3504 uStackFrame.pu32[1] = pCtx->cs.Sel;
3505 uStackFrame.pu32[5] = pCtx->es.Sel;
3506 uStackFrame.pu32[6] = pCtx->ds.Sel;
3507 uStackFrame.pu32[7] = pCtx->fs.Sel;
3508 uStackFrame.pu32[8] = pCtx->gs.Sel;
3509 }
3510 }
3511 else
3512 {
3513 if (fFlags & IEM_XCPT_FLAGS_ERR)
3514 *uStackFrame.pu16++ = uErr;
3515 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3516 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3517 uStackFrame.pu16[2] = fEfl;
3518 uStackFrame.pu16[3] = pCtx->sp;
3519 uStackFrame.pu16[4] = pCtx->ss.Sel;
3520 if (fEfl & X86_EFL_VM)
3521 {
3522 uStackFrame.pu16[1] = pCtx->cs.Sel;
3523 uStackFrame.pu16[5] = pCtx->es.Sel;
3524 uStackFrame.pu16[6] = pCtx->ds.Sel;
3525 uStackFrame.pu16[7] = pCtx->fs.Sel;
3526 uStackFrame.pu16[8] = pCtx->gs.Sel;
3527 }
3528 }
3529 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3530 if (rcStrict != VINF_SUCCESS)
3531 return rcStrict;
3532
3533 /* Mark the selectors 'accessed' (hope this is the correct time). */
3534 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3535 * after pushing the stack frame? (Write protect the gdt + stack to
3536 * find out.) */
3537 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3538 {
3539 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3540 if (rcStrict != VINF_SUCCESS)
3541 return rcStrict;
3542 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3543 }
3544
3545 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3546 {
3547 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3548 if (rcStrict != VINF_SUCCESS)
3549 return rcStrict;
3550 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3551 }
3552
3553 /*
3554 * Start comitting the register changes (joins with the DPL=CPL branch).
3555 */
3556 pCtx->ss.Sel = NewSS;
3557 pCtx->ss.ValidSel = NewSS;
3558 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3559 pCtx->ss.u32Limit = cbLimitSS;
3560 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3561 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3562 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
3563 pIemCpu->uCpl = uNewCpl;
3564
3565 if (fEfl & X86_EFL_VM)
3566 {
3567 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3568 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3569 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3570 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3571 }
3572 }
3573 /*
3574 * Same privilege, no stack change and smaller stack frame.
3575 */
3576 else
3577 {
3578 uint64_t uNewRsp;
3579 RTPTRUNION uStackFrame;
3580 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3581 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3582 if (rcStrict != VINF_SUCCESS)
3583 return rcStrict;
3584 void * const pvStackFrame = uStackFrame.pv;
3585
3586 if (f32BitGate)
3587 {
3588 if (fFlags & IEM_XCPT_FLAGS_ERR)
3589 *uStackFrame.pu32++ = uErr;
3590 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3591 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3592 uStackFrame.pu32[2] = fEfl;
3593 }
3594 else
3595 {
3596 if (fFlags & IEM_XCPT_FLAGS_ERR)
3597 *uStackFrame.pu16++ = uErr;
3598 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3599 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3600 uStackFrame.pu16[2] = fEfl;
3601 }
3602 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3603 if (rcStrict != VINF_SUCCESS)
3604 return rcStrict;
3605
3606 /* Mark the CS selector as 'accessed'. */
3607 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3608 {
3609 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3610 if (rcStrict != VINF_SUCCESS)
3611 return rcStrict;
3612 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3613 }
3614
3615 /*
3616 * Start committing the register changes (joins with the other branch).
3617 */
3618 pCtx->rsp = uNewRsp;
3619 }
3620
3621 /* ... register committing continues. */
3622 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3623 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3624 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3625 pCtx->cs.u32Limit = cbLimitCS;
3626 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3627 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3628
3629 pCtx->rip = uNewEip;
3630 fEfl &= ~fEflToClear;
3631 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3632
3633 if (fFlags & IEM_XCPT_FLAGS_CR2)
3634 pCtx->cr2 = uCr2;
3635
3636 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3637 iemRaiseXcptAdjustState(pCtx, u8Vector);
3638
3639 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3640}
3641
3642
3643/**
3644 * Implements exceptions and interrupts for long mode.
3645 *
3646 * @returns VBox strict status code.
3647 * @param pIemCpu The IEM per CPU instance data.
3648 * @param pCtx The CPU context.
3649 * @param cbInstr The number of bytes to offset rIP by in the return
3650 * address.
3651 * @param u8Vector The interrupt / exception vector number.
3652 * @param fFlags The flags.
3653 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3654 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3655 */
3656IEM_STATIC VBOXSTRICTRC
3657iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3658 PCPUMCTX pCtx,
3659 uint8_t cbInstr,
3660 uint8_t u8Vector,
3661 uint32_t fFlags,
3662 uint16_t uErr,
3663 uint64_t uCr2)
3664{
3665 /*
3666 * Read the IDT entry.
3667 */
3668 uint16_t offIdt = (uint16_t)u8Vector << 4;
3669 if (pCtx->idtr.cbIdt < offIdt + 7)
3670 {
3671 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3672 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3673 }
3674 X86DESC64 Idte;
3675 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3676 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3677 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3678 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3679 return rcStrict;
3680 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3681 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3682 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3683
3684 /*
3685 * Check the descriptor type, DPL and such.
3686 * ASSUMES this is done in the same order as described for call-gate calls.
3687 */
3688 if (Idte.Gate.u1DescType)
3689 {
3690 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3691 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3692 }
3693 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3694 switch (Idte.Gate.u4Type)
3695 {
3696 case AMD64_SEL_TYPE_SYS_INT_GATE:
3697 fEflToClear |= X86_EFL_IF;
3698 break;
3699 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3700 break;
3701
3702 default:
3703 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3704 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3705 }
3706
3707 /* Check DPL against CPL if applicable. */
3708 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3709 {
3710 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3711 {
3712 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3713 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3714 }
3715 }
3716
3717 /* Is it there? */
3718 if (!Idte.Gate.u1Present)
3719 {
3720 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3721 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3722 }
3723
3724 /* A null CS is bad. */
3725 RTSEL NewCS = Idte.Gate.u16Sel;
3726 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3727 {
3728 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3729 return iemRaiseGeneralProtectionFault0(pIemCpu);
3730 }
3731
3732 /* Fetch the descriptor for the new CS. */
3733 IEMSELDESC DescCS;
3734 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3735 if (rcStrict != VINF_SUCCESS)
3736 {
3737 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3738 return rcStrict;
3739 }
3740
3741 /* Must be a 64-bit code segment. */
3742 if (!DescCS.Long.Gen.u1DescType)
3743 {
3744 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3745 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3746 }
3747 if ( !DescCS.Long.Gen.u1Long
3748 || DescCS.Long.Gen.u1DefBig
3749 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3750 {
3751 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3752 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3753 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3754 }
3755
3756 /* Don't allow lowering the privilege level. For non-conforming CS
3757 selectors, the CS.DPL sets the privilege level the trap/interrupt
3758 handler runs at. For conforming CS selectors, the CPL remains
3759 unchanged, but the CS.DPL must be <= CPL. */
3760 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3761 * when CPU in Ring-0. Result \#GP? */
3762 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3763 {
3764 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3765 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3766 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3767 }
3768
3769
3770 /* Make sure the selector is present. */
3771 if (!DescCS.Legacy.Gen.u1Present)
3772 {
3773 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3774 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3775 }
3776
3777 /* Check that the new RIP is canonical. */
3778 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3779 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3780 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3781 if (!IEM_IS_CANONICAL(uNewRip))
3782 {
3783 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3784 return iemRaiseGeneralProtectionFault0(pIemCpu);
3785 }
3786
3787 /*
3788 * If the privilege level changes or if the IST isn't zero, we need to get
3789 * a new stack from the TSS.
3790 */
3791 uint64_t uNewRsp;
3792 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3793 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3794 if ( uNewCpl != pIemCpu->uCpl
3795 || Idte.Gate.u3IST != 0)
3796 {
3797 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3798 if (rcStrict != VINF_SUCCESS)
3799 return rcStrict;
3800 }
3801 else
3802 uNewRsp = pCtx->rsp;
3803 uNewRsp &= ~(uint64_t)0xf;
3804
3805 /*
3806 * Calc the flag image to push.
3807 */
3808 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3809 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3810 fEfl &= ~X86_EFL_RF;
3811 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3812 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3813
3814 /*
3815 * Start making changes.
3816 */
3817
3818 /* Create the stack frame. */
3819 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3820 RTPTRUNION uStackFrame;
3821 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3822 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3823 if (rcStrict != VINF_SUCCESS)
3824 return rcStrict;
3825 void * const pvStackFrame = uStackFrame.pv;
3826
3827 if (fFlags & IEM_XCPT_FLAGS_ERR)
3828 *uStackFrame.pu64++ = uErr;
3829 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3830 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3831 uStackFrame.pu64[2] = fEfl;
3832 uStackFrame.pu64[3] = pCtx->rsp;
3833 uStackFrame.pu64[4] = pCtx->ss.Sel;
3834 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3835 if (rcStrict != VINF_SUCCESS)
3836 return rcStrict;
3837
3838 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3839 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3840 * after pushing the stack frame? (Write protect the gdt + stack to
3841 * find out.) */
3842 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3843 {
3844 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3845 if (rcStrict != VINF_SUCCESS)
3846 return rcStrict;
3847 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3848 }
3849
3850 /*
3851 * Start comitting the register changes.
3852 */
3853 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3854 * hidden registers when interrupting 32-bit or 16-bit code! */
3855 if (uNewCpl != pIemCpu->uCpl)
3856 {
3857 pCtx->ss.Sel = 0 | uNewCpl;
3858 pCtx->ss.ValidSel = 0 | uNewCpl;
3859 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3860 pCtx->ss.u32Limit = UINT32_MAX;
3861 pCtx->ss.u64Base = 0;
3862 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3863 }
3864 pCtx->rsp = uNewRsp - cbStackFrame;
3865 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3866 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3867 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3868 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3869 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3870 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3871 pCtx->rip = uNewRip;
3872 pIemCpu->uCpl = uNewCpl;
3873
3874 fEfl &= ~fEflToClear;
3875 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3876
3877 if (fFlags & IEM_XCPT_FLAGS_CR2)
3878 pCtx->cr2 = uCr2;
3879
3880 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3881 iemRaiseXcptAdjustState(pCtx, u8Vector);
3882
3883 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3884}
3885
3886
3887/**
3888 * Implements exceptions and interrupts.
3889 *
3890 * All exceptions and interrupts goes thru this function!
3891 *
3892 * @returns VBox strict status code.
3893 * @param pIemCpu The IEM per CPU instance data.
3894 * @param cbInstr The number of bytes to offset rIP by in the return
3895 * address.
3896 * @param u8Vector The interrupt / exception vector number.
3897 * @param fFlags The flags.
3898 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3899 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3900 */
3901DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3902iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3903 uint8_t cbInstr,
3904 uint8_t u8Vector,
3905 uint32_t fFlags,
3906 uint16_t uErr,
3907 uint64_t uCr2)
3908{
3909 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3910#ifdef IN_RING0
3911 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3912 AssertRCReturn(rc, rc);
3913#endif
3914
3915 /*
3916 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3917 */
3918 if ( pCtx->eflags.Bits.u1VM
3919 && pCtx->eflags.Bits.u2IOPL != 3
3920 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3921 && (pCtx->cr0 & X86_CR0_PE) )
3922 {
3923 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3924 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3925 u8Vector = X86_XCPT_GP;
3926 uErr = 0;
3927 }
3928#ifdef DBGFTRACE_ENABLED
3929 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3930 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3931 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3932#endif
3933
3934 /*
3935 * Do recursion accounting.
3936 */
3937 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3938 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3939 if (pIemCpu->cXcptRecursions == 0)
3940 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3941 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3942 else
3943 {
3944 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3945 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3946
3947 /** @todo double and tripple faults. */
3948 if (pIemCpu->cXcptRecursions >= 3)
3949 {
3950#ifdef DEBUG_bird
3951 AssertFailed();
3952#endif
3953 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3954 }
3955
3956 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
3957 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
3958 {
3959 ....
3960 } */
3961 }
3962 pIemCpu->cXcptRecursions++;
3963 pIemCpu->uCurXcpt = u8Vector;
3964 pIemCpu->fCurXcpt = fFlags;
3965
3966 /*
3967 * Extensive logging.
3968 */
3969#if defined(LOG_ENABLED) && defined(IN_RING3)
3970 if (LogIs3Enabled())
3971 {
3972 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3973 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3974 char szRegs[4096];
3975 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3976 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3977 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3978 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3979 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3980 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3981 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3982 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3983 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3984 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3985 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3986 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3987 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3988 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3989 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3990 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3991 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3992 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3993 " efer=%016VR{efer}\n"
3994 " pat=%016VR{pat}\n"
3995 " sf_mask=%016VR{sf_mask}\n"
3996 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3997 " lstar=%016VR{lstar}\n"
3998 " star=%016VR{star} cstar=%016VR{cstar}\n"
3999 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4000 );
4001
4002 char szInstr[256];
4003 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4004 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4005 szInstr, sizeof(szInstr), NULL);
4006 Log3(("%s%s\n", szRegs, szInstr));
4007 }
4008#endif /* LOG_ENABLED */
4009
4010 /*
4011 * Call the mode specific worker function.
4012 */
4013 VBOXSTRICTRC rcStrict;
4014 if (!(pCtx->cr0 & X86_CR0_PE))
4015 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4016 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4017 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4018 else
4019 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4020
4021 /*
4022 * Unwind.
4023 */
4024 pIemCpu->cXcptRecursions--;
4025 pIemCpu->uCurXcpt = uPrevXcpt;
4026 pIemCpu->fCurXcpt = fPrevXcpt;
4027 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4028 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4029 return rcStrict;
4030}
4031
4032
4033/** \#DE - 00. */
4034DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4035{
4036 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4037}
4038
4039
4040/** \#DB - 01.
4041 * @note This automatically clear DR7.GD. */
4042DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4043{
4044 /** @todo set/clear RF. */
4045 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4046 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4047}
4048
4049
4050/** \#UD - 06. */
4051DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4052{
4053 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4054}
4055
4056
4057/** \#NM - 07. */
4058DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4059{
4060 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4061}
4062
4063
4064/** \#TS(err) - 0a. */
4065DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4066{
4067 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4068}
4069
4070
4071/** \#TS(tr) - 0a. */
4072DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4073{
4074 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4075 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4076}
4077
4078
4079/** \#TS(0) - 0a. */
4080DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4081{
4082 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4083 0, 0);
4084}
4085
4086
4087/** \#TS(err) - 0a. */
4088DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4089{
4090 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4091 uSel & X86_SEL_MASK_OFF_RPL, 0);
4092}
4093
4094
4095/** \#NP(err) - 0b. */
4096DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4097{
4098 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4099}
4100
4101
4102/** \#NP(seg) - 0b. */
4103DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4104{
4105 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4106 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4107}
4108
4109
4110/** \#NP(sel) - 0b. */
4111DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4112{
4113 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4114 uSel & ~X86_SEL_RPL, 0);
4115}
4116
4117
4118/** \#SS(seg) - 0c. */
4119DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4120{
4121 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4122 uSel & ~X86_SEL_RPL, 0);
4123}
4124
4125
4126/** \#SS(err) - 0c. */
4127DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4128{
4129 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4130}
4131
4132
4133/** \#GP(n) - 0d. */
4134DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4135{
4136 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4137}
4138
4139
4140/** \#GP(0) - 0d. */
4141DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4142{
4143 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4144}
4145
4146
4147/** \#GP(sel) - 0d. */
4148DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4149{
4150 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4151 Sel & ~X86_SEL_RPL, 0);
4152}
4153
4154
4155/** \#GP(0) - 0d. */
4156DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4157{
4158 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4159}
4160
4161
4162/** \#GP(sel) - 0d. */
4163DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4164{
4165 NOREF(iSegReg); NOREF(fAccess);
4166 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4167 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4168}
4169
4170
4171/** \#GP(sel) - 0d. */
4172DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4173{
4174 NOREF(Sel);
4175 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4176}
4177
4178
4179/** \#GP(sel) - 0d. */
4180DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4181{
4182 NOREF(iSegReg); NOREF(fAccess);
4183 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4184}
4185
4186
4187/** \#PF(n) - 0e. */
4188DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4189{
4190 uint16_t uErr;
4191 switch (rc)
4192 {
4193 case VERR_PAGE_NOT_PRESENT:
4194 case VERR_PAGE_TABLE_NOT_PRESENT:
4195 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4196 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4197 uErr = 0;
4198 break;
4199
4200 default:
4201 AssertMsgFailed(("%Rrc\n", rc));
4202 case VERR_ACCESS_DENIED:
4203 uErr = X86_TRAP_PF_P;
4204 break;
4205
4206 /** @todo reserved */
4207 }
4208
4209 if (pIemCpu->uCpl == 3)
4210 uErr |= X86_TRAP_PF_US;
4211
4212 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4213 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4214 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4215 uErr |= X86_TRAP_PF_ID;
4216
4217#if 0 /* This is so much non-sense, really. Why was it done like that? */
4218 /* Note! RW access callers reporting a WRITE protection fault, will clear
4219 the READ flag before calling. So, read-modify-write accesses (RW)
4220 can safely be reported as READ faults. */
4221 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4222 uErr |= X86_TRAP_PF_RW;
4223#else
4224 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4225 {
4226 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4227 uErr |= X86_TRAP_PF_RW;
4228 }
4229#endif
4230
4231 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4232 uErr, GCPtrWhere);
4233}
4234
4235
4236/** \#MF(0) - 10. */
4237DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4238{
4239 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4240}
4241
4242
4243/** \#AC(0) - 11. */
4244DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4245{
4246 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4247}
4248
4249
4250/**
4251 * Macro for calling iemCImplRaiseDivideError().
4252 *
4253 * This enables us to add/remove arguments and force different levels of
4254 * inlining as we wish.
4255 *
4256 * @return Strict VBox status code.
4257 */
4258#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4259IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4260{
4261 NOREF(cbInstr);
4262 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4263}
4264
4265
4266/**
4267 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4268 *
4269 * This enables us to add/remove arguments and force different levels of
4270 * inlining as we wish.
4271 *
4272 * @return Strict VBox status code.
4273 */
4274#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4275IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4276{
4277 NOREF(cbInstr);
4278 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4279}
4280
4281
4282/**
4283 * Macro for calling iemCImplRaiseInvalidOpcode().
4284 *
4285 * This enables us to add/remove arguments and force different levels of
4286 * inlining as we wish.
4287 *
4288 * @return Strict VBox status code.
4289 */
4290#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4291IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4292{
4293 NOREF(cbInstr);
4294 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4295}
4296
4297
4298/** @} */
4299
4300
4301/*
4302 *
4303 * Helpers routines.
4304 * Helpers routines.
4305 * Helpers routines.
4306 *
4307 */
4308
4309/**
4310 * Recalculates the effective operand size.
4311 *
4312 * @param pIemCpu The IEM state.
4313 */
4314IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4315{
4316 switch (pIemCpu->enmCpuMode)
4317 {
4318 case IEMMODE_16BIT:
4319 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4320 break;
4321 case IEMMODE_32BIT:
4322 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4323 break;
4324 case IEMMODE_64BIT:
4325 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4326 {
4327 case 0:
4328 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4329 break;
4330 case IEM_OP_PRF_SIZE_OP:
4331 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4332 break;
4333 case IEM_OP_PRF_SIZE_REX_W:
4334 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4335 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4336 break;
4337 }
4338 break;
4339 default:
4340 AssertFailed();
4341 }
4342}
4343
4344
4345/**
4346 * Sets the default operand size to 64-bit and recalculates the effective
4347 * operand size.
4348 *
4349 * @param pIemCpu The IEM state.
4350 */
4351IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4352{
4353 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4354 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4355 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4356 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4357 else
4358 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4359}
4360
4361
4362/*
4363 *
4364 * Common opcode decoders.
4365 * Common opcode decoders.
4366 * Common opcode decoders.
4367 *
4368 */
4369//#include <iprt/mem.h>
4370
4371/**
4372 * Used to add extra details about a stub case.
4373 * @param pIemCpu The IEM per CPU state.
4374 */
4375IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4376{
4377#if defined(LOG_ENABLED) && defined(IN_RING3)
4378 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4379 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4380 char szRegs[4096];
4381 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4382 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4383 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4384 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4385 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4386 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4387 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4388 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4389 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4390 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4391 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4392 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4393 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4394 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4395 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4396 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4397 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4398 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4399 " efer=%016VR{efer}\n"
4400 " pat=%016VR{pat}\n"
4401 " sf_mask=%016VR{sf_mask}\n"
4402 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4403 " lstar=%016VR{lstar}\n"
4404 " star=%016VR{star} cstar=%016VR{cstar}\n"
4405 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4406 );
4407
4408 char szInstr[256];
4409 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4410 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4411 szInstr, sizeof(szInstr), NULL);
4412
4413 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4414#else
4415 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4416#endif
4417}
4418
4419/**
4420 * Complains about a stub.
4421 *
4422 * Providing two versions of this macro, one for daily use and one for use when
4423 * working on IEM.
4424 */
4425#if 0
4426# define IEMOP_BITCH_ABOUT_STUB() \
4427 do { \
4428 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4429 iemOpStubMsg2(pIemCpu); \
4430 RTAssertPanic(); \
4431 } while (0)
4432#else
4433# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4434#endif
4435
4436/** Stubs an opcode. */
4437#define FNIEMOP_STUB(a_Name) \
4438 FNIEMOP_DEF(a_Name) \
4439 { \
4440 IEMOP_BITCH_ABOUT_STUB(); \
4441 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4442 } \
4443 typedef int ignore_semicolon
4444
4445/** Stubs an opcode. */
4446#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4447 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4448 { \
4449 IEMOP_BITCH_ABOUT_STUB(); \
4450 NOREF(a_Name0); \
4451 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4452 } \
4453 typedef int ignore_semicolon
4454
4455/** Stubs an opcode which currently should raise \#UD. */
4456#define FNIEMOP_UD_STUB(a_Name) \
4457 FNIEMOP_DEF(a_Name) \
4458 { \
4459 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4460 return IEMOP_RAISE_INVALID_OPCODE(); \
4461 } \
4462 typedef int ignore_semicolon
4463
4464/** Stubs an opcode which currently should raise \#UD. */
4465#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4466 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4467 { \
4468 NOREF(a_Name0); \
4469 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4470 return IEMOP_RAISE_INVALID_OPCODE(); \
4471 } \
4472 typedef int ignore_semicolon
4473
4474
4475
4476/** @name Register Access.
4477 * @{
4478 */
4479
4480/**
4481 * Gets a reference (pointer) to the specified hidden segment register.
4482 *
4483 * @returns Hidden register reference.
4484 * @param pIemCpu The per CPU data.
4485 * @param iSegReg The segment register.
4486 */
4487IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4488{
4489 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4490 PCPUMSELREG pSReg;
4491 switch (iSegReg)
4492 {
4493 case X86_SREG_ES: pSReg = &pCtx->es; break;
4494 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4495 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4496 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4497 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4498 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4499 default:
4500 AssertFailedReturn(NULL);
4501 }
4502#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4503 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4504 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4505#else
4506 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4507#endif
4508 return pSReg;
4509}
4510
4511
4512/**
4513 * Gets a reference (pointer) to the specified segment register (the selector
4514 * value).
4515 *
4516 * @returns Pointer to the selector variable.
4517 * @param pIemCpu The per CPU data.
4518 * @param iSegReg The segment register.
4519 */
4520IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4521{
4522 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4523 switch (iSegReg)
4524 {
4525 case X86_SREG_ES: return &pCtx->es.Sel;
4526 case X86_SREG_CS: return &pCtx->cs.Sel;
4527 case X86_SREG_SS: return &pCtx->ss.Sel;
4528 case X86_SREG_DS: return &pCtx->ds.Sel;
4529 case X86_SREG_FS: return &pCtx->fs.Sel;
4530 case X86_SREG_GS: return &pCtx->gs.Sel;
4531 }
4532 AssertFailedReturn(NULL);
4533}
4534
4535
4536/**
4537 * Fetches the selector value of a segment register.
4538 *
4539 * @returns The selector value.
4540 * @param pIemCpu The per CPU data.
4541 * @param iSegReg The segment register.
4542 */
4543IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4544{
4545 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4546 switch (iSegReg)
4547 {
4548 case X86_SREG_ES: return pCtx->es.Sel;
4549 case X86_SREG_CS: return pCtx->cs.Sel;
4550 case X86_SREG_SS: return pCtx->ss.Sel;
4551 case X86_SREG_DS: return pCtx->ds.Sel;
4552 case X86_SREG_FS: return pCtx->fs.Sel;
4553 case X86_SREG_GS: return pCtx->gs.Sel;
4554 }
4555 AssertFailedReturn(0xffff);
4556}
4557
4558
4559/**
4560 * Gets a reference (pointer) to the specified general register.
4561 *
4562 * @returns Register reference.
4563 * @param pIemCpu The per CPU data.
4564 * @param iReg The general register.
4565 */
4566IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4567{
4568 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4569 switch (iReg)
4570 {
4571 case X86_GREG_xAX: return &pCtx->rax;
4572 case X86_GREG_xCX: return &pCtx->rcx;
4573 case X86_GREG_xDX: return &pCtx->rdx;
4574 case X86_GREG_xBX: return &pCtx->rbx;
4575 case X86_GREG_xSP: return &pCtx->rsp;
4576 case X86_GREG_xBP: return &pCtx->rbp;
4577 case X86_GREG_xSI: return &pCtx->rsi;
4578 case X86_GREG_xDI: return &pCtx->rdi;
4579 case X86_GREG_x8: return &pCtx->r8;
4580 case X86_GREG_x9: return &pCtx->r9;
4581 case X86_GREG_x10: return &pCtx->r10;
4582 case X86_GREG_x11: return &pCtx->r11;
4583 case X86_GREG_x12: return &pCtx->r12;
4584 case X86_GREG_x13: return &pCtx->r13;
4585 case X86_GREG_x14: return &pCtx->r14;
4586 case X86_GREG_x15: return &pCtx->r15;
4587 }
4588 AssertFailedReturn(NULL);
4589}
4590
4591
4592/**
4593 * Gets a reference (pointer) to the specified 8-bit general register.
4594 *
4595 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4596 *
4597 * @returns Register reference.
4598 * @param pIemCpu The per CPU data.
4599 * @param iReg The register.
4600 */
4601IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4602{
4603 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4604 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4605
4606 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4607 if (iReg >= 4)
4608 pu8Reg++;
4609 return pu8Reg;
4610}
4611
4612
4613/**
4614 * Fetches the value of a 8-bit general register.
4615 *
4616 * @returns The register value.
4617 * @param pIemCpu The per CPU data.
4618 * @param iReg The register.
4619 */
4620IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4621{
4622 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4623 return *pbSrc;
4624}
4625
4626
4627/**
4628 * Fetches the value of a 16-bit general register.
4629 *
4630 * @returns The register value.
4631 * @param pIemCpu The per CPU data.
4632 * @param iReg The register.
4633 */
4634IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4635{
4636 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4637}
4638
4639
4640/**
4641 * Fetches the value of a 32-bit general register.
4642 *
4643 * @returns The register value.
4644 * @param pIemCpu The per CPU data.
4645 * @param iReg The register.
4646 */
4647IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4648{
4649 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4650}
4651
4652
4653/**
4654 * Fetches the value of a 64-bit general register.
4655 *
4656 * @returns The register value.
4657 * @param pIemCpu The per CPU data.
4658 * @param iReg The register.
4659 */
4660IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4661{
4662 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4663}
4664
4665
4666/**
4667 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4668 *
4669 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4670 * segment limit.
4671 *
4672 * @param pIemCpu The per CPU data.
4673 * @param offNextInstr The offset of the next instruction.
4674 */
4675IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4676{
4677 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4678 switch (pIemCpu->enmEffOpSize)
4679 {
4680 case IEMMODE_16BIT:
4681 {
4682 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4683 if ( uNewIp > pCtx->cs.u32Limit
4684 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4685 return iemRaiseGeneralProtectionFault0(pIemCpu);
4686 pCtx->rip = uNewIp;
4687 break;
4688 }
4689
4690 case IEMMODE_32BIT:
4691 {
4692 Assert(pCtx->rip <= UINT32_MAX);
4693 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4694
4695 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4696 if (uNewEip > pCtx->cs.u32Limit)
4697 return iemRaiseGeneralProtectionFault0(pIemCpu);
4698 pCtx->rip = uNewEip;
4699 break;
4700 }
4701
4702 case IEMMODE_64BIT:
4703 {
4704 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4705
4706 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4707 if (!IEM_IS_CANONICAL(uNewRip))
4708 return iemRaiseGeneralProtectionFault0(pIemCpu);
4709 pCtx->rip = uNewRip;
4710 break;
4711 }
4712
4713 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4714 }
4715
4716 pCtx->eflags.Bits.u1RF = 0;
4717 return VINF_SUCCESS;
4718}
4719
4720
4721/**
4722 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4723 *
4724 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4725 * segment limit.
4726 *
4727 * @returns Strict VBox status code.
4728 * @param pIemCpu The per CPU data.
4729 * @param offNextInstr The offset of the next instruction.
4730 */
4731IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4732{
4733 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4734 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4735
4736 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4737 if ( uNewIp > pCtx->cs.u32Limit
4738 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4739 return iemRaiseGeneralProtectionFault0(pIemCpu);
4740 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4741 pCtx->rip = uNewIp;
4742 pCtx->eflags.Bits.u1RF = 0;
4743
4744 return VINF_SUCCESS;
4745}
4746
4747
4748/**
4749 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4750 *
4751 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4752 * segment limit.
4753 *
4754 * @returns Strict VBox status code.
4755 * @param pIemCpu The per CPU data.
4756 * @param offNextInstr The offset of the next instruction.
4757 */
4758IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4759{
4760 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4761 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4762
4763 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4764 {
4765 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4766
4767 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4768 if (uNewEip > pCtx->cs.u32Limit)
4769 return iemRaiseGeneralProtectionFault0(pIemCpu);
4770 pCtx->rip = uNewEip;
4771 }
4772 else
4773 {
4774 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4775
4776 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4777 if (!IEM_IS_CANONICAL(uNewRip))
4778 return iemRaiseGeneralProtectionFault0(pIemCpu);
4779 pCtx->rip = uNewRip;
4780 }
4781 pCtx->eflags.Bits.u1RF = 0;
4782 return VINF_SUCCESS;
4783}
4784
4785
4786/**
4787 * Performs a near jump to the specified address.
4788 *
4789 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4790 * segment limit.
4791 *
4792 * @param pIemCpu The per CPU data.
4793 * @param uNewRip The new RIP value.
4794 */
4795IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4796{
4797 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4798 switch (pIemCpu->enmEffOpSize)
4799 {
4800 case IEMMODE_16BIT:
4801 {
4802 Assert(uNewRip <= UINT16_MAX);
4803 if ( uNewRip > pCtx->cs.u32Limit
4804 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4805 return iemRaiseGeneralProtectionFault0(pIemCpu);
4806 /** @todo Test 16-bit jump in 64-bit mode. */
4807 pCtx->rip = uNewRip;
4808 break;
4809 }
4810
4811 case IEMMODE_32BIT:
4812 {
4813 Assert(uNewRip <= UINT32_MAX);
4814 Assert(pCtx->rip <= UINT32_MAX);
4815 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4816
4817 if (uNewRip > pCtx->cs.u32Limit)
4818 return iemRaiseGeneralProtectionFault0(pIemCpu);
4819 pCtx->rip = uNewRip;
4820 break;
4821 }
4822
4823 case IEMMODE_64BIT:
4824 {
4825 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4826
4827 if (!IEM_IS_CANONICAL(uNewRip))
4828 return iemRaiseGeneralProtectionFault0(pIemCpu);
4829 pCtx->rip = uNewRip;
4830 break;
4831 }
4832
4833 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4834 }
4835
4836 pCtx->eflags.Bits.u1RF = 0;
4837 return VINF_SUCCESS;
4838}
4839
4840
4841/**
4842 * Get the address of the top of the stack.
4843 *
4844 * @param pIemCpu The per CPU data.
4845 * @param pCtx The CPU context which SP/ESP/RSP should be
4846 * read.
4847 */
4848DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4849{
4850 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4851 return pCtx->rsp;
4852 if (pCtx->ss.Attr.n.u1DefBig)
4853 return pCtx->esp;
4854 return pCtx->sp;
4855}
4856
4857
4858/**
4859 * Updates the RIP/EIP/IP to point to the next instruction.
4860 *
4861 * This function leaves the EFLAGS.RF flag alone.
4862 *
4863 * @param pIemCpu The per CPU data.
4864 * @param cbInstr The number of bytes to add.
4865 */
4866IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4867{
4868 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4869 switch (pIemCpu->enmCpuMode)
4870 {
4871 case IEMMODE_16BIT:
4872 Assert(pCtx->rip <= UINT16_MAX);
4873 pCtx->eip += cbInstr;
4874 pCtx->eip &= UINT32_C(0xffff);
4875 break;
4876
4877 case IEMMODE_32BIT:
4878 pCtx->eip += cbInstr;
4879 Assert(pCtx->rip <= UINT32_MAX);
4880 break;
4881
4882 case IEMMODE_64BIT:
4883 pCtx->rip += cbInstr;
4884 break;
4885 default: AssertFailed();
4886 }
4887}
4888
4889
4890#if 0
4891/**
4892 * Updates the RIP/EIP/IP to point to the next instruction.
4893 *
4894 * @param pIemCpu The per CPU data.
4895 */
4896IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4897{
4898 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4899}
4900#endif
4901
4902
4903
4904/**
4905 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4906 *
4907 * @param pIemCpu The per CPU data.
4908 * @param cbInstr The number of bytes to add.
4909 */
4910IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4911{
4912 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4913
4914 pCtx->eflags.Bits.u1RF = 0;
4915
4916 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4917 switch (pIemCpu->enmCpuMode)
4918 {
4919 /** @todo investigate if EIP or RIP is really incremented. */
4920 case IEMMODE_16BIT:
4921 case IEMMODE_32BIT:
4922 pCtx->eip += cbInstr;
4923 Assert(pCtx->rip <= UINT32_MAX);
4924 break;
4925
4926 case IEMMODE_64BIT:
4927 pCtx->rip += cbInstr;
4928 break;
4929 default: AssertFailed();
4930 }
4931}
4932
4933
4934/**
4935 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4936 *
4937 * @param pIemCpu The per CPU data.
4938 */
4939IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4940{
4941 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4942}
4943
4944
4945/**
4946 * Adds to the stack pointer.
4947 *
4948 * @param pIemCpu The per CPU data.
4949 * @param pCtx The CPU context which SP/ESP/RSP should be
4950 * updated.
4951 * @param cbToAdd The number of bytes to add.
4952 */
4953DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
4954{
4955 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4956 pCtx->rsp += cbToAdd;
4957 else if (pCtx->ss.Attr.n.u1DefBig)
4958 pCtx->esp += cbToAdd;
4959 else
4960 pCtx->sp += cbToAdd;
4961}
4962
4963
4964/**
4965 * Subtracts from the stack pointer.
4966 *
4967 * @param pIemCpu The per CPU data.
4968 * @param pCtx The CPU context which SP/ESP/RSP should be
4969 * updated.
4970 * @param cbToSub The number of bytes to subtract.
4971 */
4972DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4973{
4974 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4975 pCtx->rsp -= cbToSub;
4976 else if (pCtx->ss.Attr.n.u1DefBig)
4977 pCtx->esp -= cbToSub;
4978 else
4979 pCtx->sp -= cbToSub;
4980}
4981
4982
4983/**
4984 * Adds to the temporary stack pointer.
4985 *
4986 * @param pIemCpu The per CPU data.
4987 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4988 * @param cbToAdd The number of bytes to add.
4989 * @param pCtx Where to get the current stack mode.
4990 */
4991DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4992{
4993 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4994 pTmpRsp->u += cbToAdd;
4995 else if (pCtx->ss.Attr.n.u1DefBig)
4996 pTmpRsp->DWords.dw0 += cbToAdd;
4997 else
4998 pTmpRsp->Words.w0 += cbToAdd;
4999}
5000
5001
5002/**
5003 * Subtracts from the temporary stack pointer.
5004 *
5005 * @param pIemCpu The per CPU data.
5006 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5007 * @param cbToSub The number of bytes to subtract.
5008 * @param pCtx Where to get the current stack mode.
5009 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5010 * expecting that.
5011 */
5012DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5013{
5014 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5015 pTmpRsp->u -= cbToSub;
5016 else if (pCtx->ss.Attr.n.u1DefBig)
5017 pTmpRsp->DWords.dw0 -= cbToSub;
5018 else
5019 pTmpRsp->Words.w0 -= cbToSub;
5020}
5021
5022
5023/**
5024 * Calculates the effective stack address for a push of the specified size as
5025 * well as the new RSP value (upper bits may be masked).
5026 *
5027 * @returns Effective stack addressf for the push.
5028 * @param pIemCpu The IEM per CPU data.
5029 * @param pCtx Where to get the current stack mode.
5030 * @param cbItem The size of the stack item to pop.
5031 * @param puNewRsp Where to return the new RSP value.
5032 */
5033DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5034{
5035 RTUINT64U uTmpRsp;
5036 RTGCPTR GCPtrTop;
5037 uTmpRsp.u = pCtx->rsp;
5038
5039 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5040 GCPtrTop = uTmpRsp.u -= cbItem;
5041 else if (pCtx->ss.Attr.n.u1DefBig)
5042 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5043 else
5044 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5045 *puNewRsp = uTmpRsp.u;
5046 return GCPtrTop;
5047}
5048
5049
5050/**
5051 * Gets the current stack pointer and calculates the value after a pop of the
5052 * specified size.
5053 *
5054 * @returns Current stack pointer.
5055 * @param pIemCpu The per CPU data.
5056 * @param pCtx Where to get the current stack mode.
5057 * @param cbItem The size of the stack item to pop.
5058 * @param puNewRsp Where to return the new RSP value.
5059 */
5060DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5061{
5062 RTUINT64U uTmpRsp;
5063 RTGCPTR GCPtrTop;
5064 uTmpRsp.u = pCtx->rsp;
5065
5066 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5067 {
5068 GCPtrTop = uTmpRsp.u;
5069 uTmpRsp.u += cbItem;
5070 }
5071 else if (pCtx->ss.Attr.n.u1DefBig)
5072 {
5073 GCPtrTop = uTmpRsp.DWords.dw0;
5074 uTmpRsp.DWords.dw0 += cbItem;
5075 }
5076 else
5077 {
5078 GCPtrTop = uTmpRsp.Words.w0;
5079 uTmpRsp.Words.w0 += cbItem;
5080 }
5081 *puNewRsp = uTmpRsp.u;
5082 return GCPtrTop;
5083}
5084
5085
5086/**
5087 * Calculates the effective stack address for a push of the specified size as
5088 * well as the new temporary RSP value (upper bits may be masked).
5089 *
5090 * @returns Effective stack addressf for the push.
5091 * @param pIemCpu The per CPU data.
5092 * @param pCtx Where to get the current stack mode.
5093 * @param pTmpRsp The temporary stack pointer. This is updated.
5094 * @param cbItem The size of the stack item to pop.
5095 */
5096DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5097{
5098 RTGCPTR GCPtrTop;
5099
5100 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5101 GCPtrTop = pTmpRsp->u -= cbItem;
5102 else if (pCtx->ss.Attr.n.u1DefBig)
5103 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5104 else
5105 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5106 return GCPtrTop;
5107}
5108
5109
5110/**
5111 * Gets the effective stack address for a pop of the specified size and
5112 * calculates and updates the temporary RSP.
5113 *
5114 * @returns Current stack pointer.
5115 * @param pIemCpu The per CPU data.
5116 * @param pCtx Where to get the current stack mode.
5117 * @param pTmpRsp The temporary stack pointer. This is updated.
5118 * @param cbItem The size of the stack item to pop.
5119 */
5120DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5121{
5122 RTGCPTR GCPtrTop;
5123 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5124 {
5125 GCPtrTop = pTmpRsp->u;
5126 pTmpRsp->u += cbItem;
5127 }
5128 else if (pCtx->ss.Attr.n.u1DefBig)
5129 {
5130 GCPtrTop = pTmpRsp->DWords.dw0;
5131 pTmpRsp->DWords.dw0 += cbItem;
5132 }
5133 else
5134 {
5135 GCPtrTop = pTmpRsp->Words.w0;
5136 pTmpRsp->Words.w0 += cbItem;
5137 }
5138 return GCPtrTop;
5139}
5140
5141/** @} */
5142
5143
5144/** @name FPU access and helpers.
5145 *
5146 * @{
5147 */
5148
5149
5150/**
5151 * Hook for preparing to use the host FPU.
5152 *
5153 * This is necessary in ring-0 and raw-mode context.
5154 *
5155 * @param pIemCpu The IEM per CPU data.
5156 */
5157DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5158{
5159#ifdef IN_RING3
5160 NOREF(pIemCpu);
5161#else
5162/** @todo RZ: FIXME */
5163//# error "Implement me"
5164#endif
5165}
5166
5167
5168/**
5169 * Hook for preparing to use the host FPU for SSE
5170 *
5171 * This is necessary in ring-0 and raw-mode context.
5172 *
5173 * @param pIemCpu The IEM per CPU data.
5174 */
5175DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5176{
5177 iemFpuPrepareUsage(pIemCpu);
5178}
5179
5180
5181/**
5182 * Stores a QNaN value into a FPU register.
5183 *
5184 * @param pReg Pointer to the register.
5185 */
5186DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5187{
5188 pReg->au32[0] = UINT32_C(0x00000000);
5189 pReg->au32[1] = UINT32_C(0xc0000000);
5190 pReg->au16[4] = UINT16_C(0xffff);
5191}
5192
5193
5194/**
5195 * Updates the FOP, FPU.CS and FPUIP registers.
5196 *
5197 * @param pIemCpu The IEM per CPU data.
5198 * @param pCtx The CPU context.
5199 * @param pFpuCtx The FPU context.
5200 */
5201DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5202{
5203 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5204 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5205 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5206 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5207 {
5208 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5209 * happens in real mode here based on the fnsave and fnstenv images. */
5210 pFpuCtx->CS = 0;
5211 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5212 }
5213 else
5214 {
5215 pFpuCtx->CS = pCtx->cs.Sel;
5216 pFpuCtx->FPUIP = pCtx->rip;
5217 }
5218}
5219
5220
5221/**
5222 * Updates the x87.DS and FPUDP registers.
5223 *
5224 * @param pIemCpu The IEM per CPU data.
5225 * @param pCtx The CPU context.
5226 * @param pFpuCtx The FPU context.
5227 * @param iEffSeg The effective segment register.
5228 * @param GCPtrEff The effective address relative to @a iEffSeg.
5229 */
5230DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5231{
5232 RTSEL sel;
5233 switch (iEffSeg)
5234 {
5235 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5236 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5237 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5238 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5239 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5240 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5241 default:
5242 AssertMsgFailed(("%d\n", iEffSeg));
5243 sel = pCtx->ds.Sel;
5244 }
5245 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5246 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5247 {
5248 pFpuCtx->DS = 0;
5249 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5250 }
5251 else
5252 {
5253 pFpuCtx->DS = sel;
5254 pFpuCtx->FPUDP = GCPtrEff;
5255 }
5256}
5257
5258
5259/**
5260 * Rotates the stack registers in the push direction.
5261 *
5262 * @param pFpuCtx The FPU context.
5263 * @remarks This is a complete waste of time, but fxsave stores the registers in
5264 * stack order.
5265 */
5266DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5267{
5268 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5269 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5270 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5271 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5272 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5273 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5274 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5275 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5276 pFpuCtx->aRegs[0].r80 = r80Tmp;
5277}
5278
5279
5280/**
5281 * Rotates the stack registers in the pop direction.
5282 *
5283 * @param pFpuCtx The FPU context.
5284 * @remarks This is a complete waste of time, but fxsave stores the registers in
5285 * stack order.
5286 */
5287DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5288{
5289 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5290 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5291 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5292 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5293 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5294 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5295 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5296 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5297 pFpuCtx->aRegs[7].r80 = r80Tmp;
5298}
5299
5300
5301/**
5302 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5303 * exception prevents it.
5304 *
5305 * @param pIemCpu The IEM per CPU data.
5306 * @param pResult The FPU operation result to push.
5307 * @param pFpuCtx The FPU context.
5308 */
5309IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5310{
5311 /* Update FSW and bail if there are pending exceptions afterwards. */
5312 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5313 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5314 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5315 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5316 {
5317 pFpuCtx->FSW = fFsw;
5318 return;
5319 }
5320
5321 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5322 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5323 {
5324 /* All is fine, push the actual value. */
5325 pFpuCtx->FTW |= RT_BIT(iNewTop);
5326 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5327 }
5328 else if (pFpuCtx->FCW & X86_FCW_IM)
5329 {
5330 /* Masked stack overflow, push QNaN. */
5331 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5332 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5333 }
5334 else
5335 {
5336 /* Raise stack overflow, don't push anything. */
5337 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5338 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5339 return;
5340 }
5341
5342 fFsw &= ~X86_FSW_TOP_MASK;
5343 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5344 pFpuCtx->FSW = fFsw;
5345
5346 iemFpuRotateStackPush(pFpuCtx);
5347}
5348
5349
5350/**
5351 * Stores a result in a FPU register and updates the FSW and FTW.
5352 *
5353 * @param pFpuCtx The FPU context.
5354 * @param pResult The result to store.
5355 * @param iStReg Which FPU register to store it in.
5356 */
5357IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5358{
5359 Assert(iStReg < 8);
5360 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5361 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5362 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5363 pFpuCtx->FTW |= RT_BIT(iReg);
5364 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5365}
5366
5367
5368/**
5369 * Only updates the FPU status word (FSW) with the result of the current
5370 * instruction.
5371 *
5372 * @param pFpuCtx The FPU context.
5373 * @param u16FSW The FSW output of the current instruction.
5374 */
5375IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5376{
5377 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5378 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5379}
5380
5381
5382/**
5383 * Pops one item off the FPU stack if no pending exception prevents it.
5384 *
5385 * @param pFpuCtx The FPU context.
5386 */
5387IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5388{
5389 /* Check pending exceptions. */
5390 uint16_t uFSW = pFpuCtx->FSW;
5391 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5392 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5393 return;
5394
5395 /* TOP--. */
5396 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5397 uFSW &= ~X86_FSW_TOP_MASK;
5398 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5399 pFpuCtx->FSW = uFSW;
5400
5401 /* Mark the previous ST0 as empty. */
5402 iOldTop >>= X86_FSW_TOP_SHIFT;
5403 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5404
5405 /* Rotate the registers. */
5406 iemFpuRotateStackPop(pFpuCtx);
5407}
5408
5409
5410/**
5411 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5412 *
5413 * @param pIemCpu The IEM per CPU data.
5414 * @param pResult The FPU operation result to push.
5415 */
5416IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5417{
5418 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5419 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5420 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5421 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5422}
5423
5424
5425/**
5426 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5427 * and sets FPUDP and FPUDS.
5428 *
5429 * @param pIemCpu The IEM per CPU data.
5430 * @param pResult The FPU operation result to push.
5431 * @param iEffSeg The effective segment register.
5432 * @param GCPtrEff The effective address relative to @a iEffSeg.
5433 */
5434IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5435{
5436 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5437 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5438 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5439 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5440 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5441}
5442
5443
5444/**
5445 * Replace ST0 with the first value and push the second onto the FPU stack,
5446 * unless a pending exception prevents it.
5447 *
5448 * @param pIemCpu The IEM per CPU data.
5449 * @param pResult The FPU operation result to store and push.
5450 */
5451IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5452{
5453 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5454 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5455 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5456
5457 /* Update FSW and bail if there are pending exceptions afterwards. */
5458 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5459 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5460 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5461 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5462 {
5463 pFpuCtx->FSW = fFsw;
5464 return;
5465 }
5466
5467 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5468 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5469 {
5470 /* All is fine, push the actual value. */
5471 pFpuCtx->FTW |= RT_BIT(iNewTop);
5472 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5473 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5474 }
5475 else if (pFpuCtx->FCW & X86_FCW_IM)
5476 {
5477 /* Masked stack overflow, push QNaN. */
5478 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5479 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5480 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5481 }
5482 else
5483 {
5484 /* Raise stack overflow, don't push anything. */
5485 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5486 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5487 return;
5488 }
5489
5490 fFsw &= ~X86_FSW_TOP_MASK;
5491 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5492 pFpuCtx->FSW = fFsw;
5493
5494 iemFpuRotateStackPush(pFpuCtx);
5495}
5496
5497
5498/**
5499 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5500 * FOP.
5501 *
5502 * @param pIemCpu The IEM per CPU data.
5503 * @param pResult The result to store.
5504 * @param iStReg Which FPU register to store it in.
5505 */
5506IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5507{
5508 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5509 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5510 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5511 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5512}
5513
5514
5515/**
5516 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5517 * FOP, and then pops the stack.
5518 *
5519 * @param pIemCpu The IEM per CPU data.
5520 * @param pResult The result to store.
5521 * @param iStReg Which FPU register to store it in.
5522 */
5523IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5524{
5525 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5526 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5527 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5528 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5529 iemFpuMaybePopOne(pFpuCtx);
5530}
5531
5532
5533/**
5534 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5535 * FPUDP, and FPUDS.
5536 *
5537 * @param pIemCpu The IEM per CPU data.
5538 * @param pResult The result to store.
5539 * @param iStReg Which FPU register to store it in.
5540 * @param iEffSeg The effective memory operand selector register.
5541 * @param GCPtrEff The effective memory operand offset.
5542 */
5543IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5544 uint8_t iEffSeg, RTGCPTR GCPtrEff)
5545{
5546 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5547 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5548 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5549 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5550 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5551}
5552
5553
5554/**
5555 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5556 * FPUDP, and FPUDS, and then pops the stack.
5557 *
5558 * @param pIemCpu The IEM per CPU data.
5559 * @param pResult The result to store.
5560 * @param iStReg Which FPU register to store it in.
5561 * @param iEffSeg The effective memory operand selector register.
5562 * @param GCPtrEff The effective memory operand offset.
5563 */
5564IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5565 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5566{
5567 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5568 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5569 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5570 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5571 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5572 iemFpuMaybePopOne(pFpuCtx);
5573}
5574
5575
5576/**
5577 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5578 *
5579 * @param pIemCpu The IEM per CPU data.
5580 */
5581IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5582{
5583 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5584 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5585 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5586}
5587
5588
5589/**
5590 * Marks the specified stack register as free (for FFREE).
5591 *
5592 * @param pIemCpu The IEM per CPU data.
5593 * @param iStReg The register to free.
5594 */
5595IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5596{
5597 Assert(iStReg < 8);
5598 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5599 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5600 pFpuCtx->FTW &= ~RT_BIT(iReg);
5601}
5602
5603
5604/**
5605 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5606 *
5607 * @param pIemCpu The IEM per CPU data.
5608 */
5609IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5610{
5611 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5612 uint16_t uFsw = pFpuCtx->FSW;
5613 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5614 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5615 uFsw &= ~X86_FSW_TOP_MASK;
5616 uFsw |= uTop;
5617 pFpuCtx->FSW = uFsw;
5618}
5619
5620
5621/**
5622 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5623 *
5624 * @param pIemCpu The IEM per CPU data.
5625 */
5626IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5627{
5628 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5629 uint16_t uFsw = pFpuCtx->FSW;
5630 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5631 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5632 uFsw &= ~X86_FSW_TOP_MASK;
5633 uFsw |= uTop;
5634 pFpuCtx->FSW = uFsw;
5635}
5636
5637
5638/**
5639 * Updates the FSW, FOP, FPUIP, and FPUCS.
5640 *
5641 * @param pIemCpu The IEM per CPU data.
5642 * @param u16FSW The FSW from the current instruction.
5643 */
5644IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5645{
5646 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5647 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5648 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5649 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5650}
5651
5652
5653/**
5654 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5655 *
5656 * @param pIemCpu The IEM per CPU data.
5657 * @param u16FSW The FSW from the current instruction.
5658 */
5659IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5660{
5661 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5662 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5663 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5664 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5665 iemFpuMaybePopOne(pFpuCtx);
5666}
5667
5668
5669/**
5670 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5671 *
5672 * @param pIemCpu The IEM per CPU data.
5673 * @param u16FSW The FSW from the current instruction.
5674 * @param iEffSeg The effective memory operand selector register.
5675 * @param GCPtrEff The effective memory operand offset.
5676 */
5677IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5678{
5679 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5680 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5681 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5682 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5683 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5684}
5685
5686
5687/**
5688 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5689 *
5690 * @param pIemCpu The IEM per CPU data.
5691 * @param u16FSW The FSW from the current instruction.
5692 */
5693IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5694{
5695 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5696 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5697 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5698 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5699 iemFpuMaybePopOne(pFpuCtx);
5700 iemFpuMaybePopOne(pFpuCtx);
5701}
5702
5703
5704/**
5705 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5706 *
5707 * @param pIemCpu The IEM per CPU data.
5708 * @param u16FSW The FSW from the current instruction.
5709 * @param iEffSeg The effective memory operand selector register.
5710 * @param GCPtrEff The effective memory operand offset.
5711 */
5712IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5713{
5714 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5715 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5716 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5717 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5718 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5719 iemFpuMaybePopOne(pFpuCtx);
5720}
5721
5722
5723/**
5724 * Worker routine for raising an FPU stack underflow exception.
5725 *
5726 * @param pIemCpu The IEM per CPU data.
5727 * @param pFpuCtx The FPU context.
5728 * @param iStReg The stack register being accessed.
5729 */
5730IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5731{
5732 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5733 if (pFpuCtx->FCW & X86_FCW_IM)
5734 {
5735 /* Masked underflow. */
5736 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5737 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5738 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5739 if (iStReg != UINT8_MAX)
5740 {
5741 pFpuCtx->FTW |= RT_BIT(iReg);
5742 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5743 }
5744 }
5745 else
5746 {
5747 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5748 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5749 }
5750}
5751
5752
5753/**
5754 * Raises a FPU stack underflow exception.
5755 *
5756 * @param pIemCpu The IEM per CPU data.
5757 * @param iStReg The destination register that should be loaded
5758 * with QNaN if \#IS is not masked. Specify
5759 * UINT8_MAX if none (like for fcom).
5760 */
5761DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5762{
5763 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5764 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5765 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5766 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5767}
5768
5769
5770DECL_NO_INLINE(IEM_STATIC, void)
5771iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5772{
5773 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5774 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5775 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5776 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5777 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5778}
5779
5780
5781DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5782{
5783 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5784 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5785 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5786 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5787 iemFpuMaybePopOne(pFpuCtx);
5788}
5789
5790
5791DECL_NO_INLINE(IEM_STATIC, void)
5792iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5793{
5794 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5795 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5796 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5797 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5798 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5799 iemFpuMaybePopOne(pFpuCtx);
5800}
5801
5802
5803DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5804{
5805 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5806 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5807 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5808 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5809 iemFpuMaybePopOne(pFpuCtx);
5810 iemFpuMaybePopOne(pFpuCtx);
5811}
5812
5813
5814DECL_NO_INLINE(IEM_STATIC, void)
5815iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5816{
5817 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5818 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5819 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5820
5821 if (pFpuCtx->FCW & X86_FCW_IM)
5822 {
5823 /* Masked overflow - Push QNaN. */
5824 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5825 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5826 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5827 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5828 pFpuCtx->FTW |= RT_BIT(iNewTop);
5829 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5830 iemFpuRotateStackPush(pFpuCtx);
5831 }
5832 else
5833 {
5834 /* Exception pending - don't change TOP or the register stack. */
5835 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5836 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5837 }
5838}
5839
5840
5841DECL_NO_INLINE(IEM_STATIC, void)
5842iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5843{
5844 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5845 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5846 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5847
5848 if (pFpuCtx->FCW & X86_FCW_IM)
5849 {
5850 /* Masked overflow - Push QNaN. */
5851 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5852 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5853 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5854 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5855 pFpuCtx->FTW |= RT_BIT(iNewTop);
5856 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5857 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5858 iemFpuRotateStackPush(pFpuCtx);
5859 }
5860 else
5861 {
5862 /* Exception pending - don't change TOP or the register stack. */
5863 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5864 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5865 }
5866}
5867
5868
5869/**
5870 * Worker routine for raising an FPU stack overflow exception on a push.
5871 *
5872 * @param pFpuCtx The FPU context.
5873 */
5874IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5875{
5876 if (pFpuCtx->FCW & X86_FCW_IM)
5877 {
5878 /* Masked overflow. */
5879 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5880 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5881 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5882 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5883 pFpuCtx->FTW |= RT_BIT(iNewTop);
5884 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5885 iemFpuRotateStackPush(pFpuCtx);
5886 }
5887 else
5888 {
5889 /* Exception pending - don't change TOP or the register stack. */
5890 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5891 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5892 }
5893}
5894
5895
5896/**
5897 * Raises a FPU stack overflow exception on a push.
5898 *
5899 * @param pIemCpu The IEM per CPU data.
5900 */
5901DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5902{
5903 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5904 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5905 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5906 iemFpuStackPushOverflowOnly(pFpuCtx);
5907}
5908
5909
5910/**
5911 * Raises a FPU stack overflow exception on a push with a memory operand.
5912 *
5913 * @param pIemCpu The IEM per CPU data.
5914 * @param iEffSeg The effective memory operand selector register.
5915 * @param GCPtrEff The effective memory operand offset.
5916 */
5917DECL_NO_INLINE(IEM_STATIC, void)
5918iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5919{
5920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5921 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5922 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5923 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5924 iemFpuStackPushOverflowOnly(pFpuCtx);
5925}
5926
5927
5928IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5929{
5930 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5931 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5932 if (pFpuCtx->FTW & RT_BIT(iReg))
5933 return VINF_SUCCESS;
5934 return VERR_NOT_FOUND;
5935}
5936
5937
5938IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5939{
5940 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5941 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5942 if (pFpuCtx->FTW & RT_BIT(iReg))
5943 {
5944 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
5945 return VINF_SUCCESS;
5946 }
5947 return VERR_NOT_FOUND;
5948}
5949
5950
5951IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5952 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5953{
5954 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5955 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5956 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5957 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5958 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5959 {
5960 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5961 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
5962 return VINF_SUCCESS;
5963 }
5964 return VERR_NOT_FOUND;
5965}
5966
5967
5968IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5969{
5970 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5971 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5972 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5973 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5974 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5975 {
5976 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5977 return VINF_SUCCESS;
5978 }
5979 return VERR_NOT_FOUND;
5980}
5981
5982
5983/**
5984 * Updates the FPU exception status after FCW is changed.
5985 *
5986 * @param pFpuCtx The FPU context.
5987 */
5988IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
5989{
5990 uint16_t u16Fsw = pFpuCtx->FSW;
5991 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
5992 u16Fsw |= X86_FSW_ES | X86_FSW_B;
5993 else
5994 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
5995 pFpuCtx->FSW = u16Fsw;
5996}
5997
5998
5999/**
6000 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6001 *
6002 * @returns The full FTW.
6003 * @param pFpuCtx The FPU context.
6004 */
6005IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6006{
6007 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6008 uint16_t u16Ftw = 0;
6009 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6010 for (unsigned iSt = 0; iSt < 8; iSt++)
6011 {
6012 unsigned const iReg = (iSt + iTop) & 7;
6013 if (!(u8Ftw & RT_BIT(iReg)))
6014 u16Ftw |= 3 << (iReg * 2); /* empty */
6015 else
6016 {
6017 uint16_t uTag;
6018 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6019 if (pr80Reg->s.uExponent == 0x7fff)
6020 uTag = 2; /* Exponent is all 1's => Special. */
6021 else if (pr80Reg->s.uExponent == 0x0000)
6022 {
6023 if (pr80Reg->s.u64Mantissa == 0x0000)
6024 uTag = 1; /* All bits are zero => Zero. */
6025 else
6026 uTag = 2; /* Must be special. */
6027 }
6028 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6029 uTag = 0; /* Valid. */
6030 else
6031 uTag = 2; /* Must be special. */
6032
6033 u16Ftw |= uTag << (iReg * 2); /* empty */
6034 }
6035 }
6036
6037 return u16Ftw;
6038}
6039
6040
6041/**
6042 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6043 *
6044 * @returns The compressed FTW.
6045 * @param u16FullFtw The full FTW to convert.
6046 */
6047IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6048{
6049 uint8_t u8Ftw = 0;
6050 for (unsigned i = 0; i < 8; i++)
6051 {
6052 if ((u16FullFtw & 3) != 3 /*empty*/)
6053 u8Ftw |= RT_BIT(i);
6054 u16FullFtw >>= 2;
6055 }
6056
6057 return u8Ftw;
6058}
6059
6060/** @} */
6061
6062
6063/** @name Memory access.
6064 *
6065 * @{
6066 */
6067
6068
6069/**
6070 * Updates the IEMCPU::cbWritten counter if applicable.
6071 *
6072 * @param pIemCpu The IEM per CPU data.
6073 * @param fAccess The access being accounted for.
6074 * @param cbMem The access size.
6075 */
6076DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6077{
6078 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6079 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6080 pIemCpu->cbWritten += (uint32_t)cbMem;
6081}
6082
6083
6084/**
6085 * Checks if the given segment can be written to, raise the appropriate
6086 * exception if not.
6087 *
6088 * @returns VBox strict status code.
6089 *
6090 * @param pIemCpu The IEM per CPU data.
6091 * @param pHid Pointer to the hidden register.
6092 * @param iSegReg The register number.
6093 * @param pu64BaseAddr Where to return the base address to use for the
6094 * segment. (In 64-bit code it may differ from the
6095 * base in the hidden segment.)
6096 */
6097IEM_STATIC VBOXSTRICTRC
6098iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6099{
6100 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6101 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6102 else
6103 {
6104 if (!pHid->Attr.n.u1Present)
6105 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6106
6107 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6108 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6109 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6110 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6111 *pu64BaseAddr = pHid->u64Base;
6112 }
6113 return VINF_SUCCESS;
6114}
6115
6116
6117/**
6118 * Checks if the given segment can be read from, raise the appropriate
6119 * exception if not.
6120 *
6121 * @returns VBox strict status code.
6122 *
6123 * @param pIemCpu The IEM per CPU data.
6124 * @param pHid Pointer to the hidden register.
6125 * @param iSegReg The register number.
6126 * @param pu64BaseAddr Where to return the base address to use for the
6127 * segment. (In 64-bit code it may differ from the
6128 * base in the hidden segment.)
6129 */
6130IEM_STATIC VBOXSTRICTRC
6131iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6132{
6133 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6134 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6135 else
6136 {
6137 if (!pHid->Attr.n.u1Present)
6138 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6139
6140 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6141 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6142 *pu64BaseAddr = pHid->u64Base;
6143 }
6144 return VINF_SUCCESS;
6145}
6146
6147
6148/**
6149 * Applies the segment limit, base and attributes.
6150 *
6151 * This may raise a \#GP or \#SS.
6152 *
6153 * @returns VBox strict status code.
6154 *
6155 * @param pIemCpu The IEM per CPU data.
6156 * @param fAccess The kind of access which is being performed.
6157 * @param iSegReg The index of the segment register to apply.
6158 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6159 * TSS, ++).
6160 * @param cbMem The access size.
6161 * @param pGCPtrMem Pointer to the guest memory address to apply
6162 * segmentation to. Input and output parameter.
6163 */
6164IEM_STATIC VBOXSTRICTRC
6165iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6166{
6167 if (iSegReg == UINT8_MAX)
6168 return VINF_SUCCESS;
6169
6170 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6171 switch (pIemCpu->enmCpuMode)
6172 {
6173 case IEMMODE_16BIT:
6174 case IEMMODE_32BIT:
6175 {
6176 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6177 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6178
6179 Assert(pSel->Attr.n.u1Present);
6180 Assert(pSel->Attr.n.u1DescType);
6181 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6182 {
6183 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6184 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6185 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6186
6187 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6188 {
6189 /** @todo CPL check. */
6190 }
6191
6192 /*
6193 * There are two kinds of data selectors, normal and expand down.
6194 */
6195 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6196 {
6197 if ( GCPtrFirst32 > pSel->u32Limit
6198 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6199 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6200 }
6201 else
6202 {
6203 /*
6204 * The upper boundary is defined by the B bit, not the G bit!
6205 */
6206 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6207 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6208 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6209 }
6210 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6211 }
6212 else
6213 {
6214
6215 /*
6216 * Code selector and usually be used to read thru, writing is
6217 * only permitted in real and V8086 mode.
6218 */
6219 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6220 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6221 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6222 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6223 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6224
6225 if ( GCPtrFirst32 > pSel->u32Limit
6226 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6227 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6228
6229 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6230 {
6231 /** @todo CPL check. */
6232 }
6233
6234 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6235 }
6236 return VINF_SUCCESS;
6237 }
6238
6239 case IEMMODE_64BIT:
6240 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6241 *pGCPtrMem += pSel->u64Base;
6242 return VINF_SUCCESS;
6243
6244 default:
6245 AssertFailedReturn(VERR_IEM_IPE_7);
6246 }
6247}
6248
6249
6250/**
6251 * Translates a virtual address to a physical physical address and checks if we
6252 * can access the page as specified.
6253 *
6254 * @param pIemCpu The IEM per CPU data.
6255 * @param GCPtrMem The virtual address.
6256 * @param fAccess The intended access.
6257 * @param pGCPhysMem Where to return the physical address.
6258 */
6259IEM_STATIC VBOXSTRICTRC
6260iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6261{
6262 /** @todo Need a different PGM interface here. We're currently using
6263 * generic / REM interfaces. this won't cut it for R0 & RC. */
6264 RTGCPHYS GCPhys;
6265 uint64_t fFlags;
6266 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6267 if (RT_FAILURE(rc))
6268 {
6269 /** @todo Check unassigned memory in unpaged mode. */
6270 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6271 *pGCPhysMem = NIL_RTGCPHYS;
6272 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6273 }
6274
6275 /* If the page is writable and does not have the no-exec bit set, all
6276 access is allowed. Otherwise we'll have to check more carefully... */
6277 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6278 {
6279 /* Write to read only memory? */
6280 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6281 && !(fFlags & X86_PTE_RW)
6282 && ( pIemCpu->uCpl != 0
6283 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6284 {
6285 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6286 *pGCPhysMem = NIL_RTGCPHYS;
6287 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6288 }
6289
6290 /* Kernel memory accessed by userland? */
6291 if ( !(fFlags & X86_PTE_US)
6292 && pIemCpu->uCpl == 3
6293 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6294 {
6295 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6296 *pGCPhysMem = NIL_RTGCPHYS;
6297 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6298 }
6299
6300 /* Executing non-executable memory? */
6301 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6302 && (fFlags & X86_PTE_PAE_NX)
6303 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6304 {
6305 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6306 *pGCPhysMem = NIL_RTGCPHYS;
6307 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6308 VERR_ACCESS_DENIED);
6309 }
6310 }
6311
6312 /*
6313 * Set the dirty / access flags.
6314 * ASSUMES this is set when the address is translated rather than on committ...
6315 */
6316 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6317 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6318 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6319 {
6320 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6321 AssertRC(rc2);
6322 }
6323
6324 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6325 *pGCPhysMem = GCPhys;
6326 return VINF_SUCCESS;
6327}
6328
6329
6330
6331/**
6332 * Maps a physical page.
6333 *
6334 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6335 * @param pIemCpu The IEM per CPU data.
6336 * @param GCPhysMem The physical address.
6337 * @param fAccess The intended access.
6338 * @param ppvMem Where to return the mapping address.
6339 * @param pLock The PGM lock.
6340 */
6341IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6342{
6343#ifdef IEM_VERIFICATION_MODE_FULL
6344 /* Force the alternative path so we can ignore writes. */
6345 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6346 {
6347 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6348 {
6349 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6350 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6351 if (RT_FAILURE(rc2))
6352 pIemCpu->fProblematicMemory = true;
6353 }
6354 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6355 }
6356#endif
6357#ifdef IEM_LOG_MEMORY_WRITES
6358 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6359 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6360#endif
6361#ifdef IEM_VERIFICATION_MODE_MINIMAL
6362 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6363#endif
6364
6365 /** @todo This API may require some improving later. A private deal with PGM
6366 * regarding locking and unlocking needs to be struct. A couple of TLBs
6367 * living in PGM, but with publicly accessible inlined access methods
6368 * could perhaps be an even better solution. */
6369 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6370 GCPhysMem,
6371 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6372 pIemCpu->fBypassHandlers,
6373 ppvMem,
6374 pLock);
6375 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6376 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6377
6378#ifdef IEM_VERIFICATION_MODE_FULL
6379 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6380 pIemCpu->fProblematicMemory = true;
6381#endif
6382 return rc;
6383}
6384
6385
6386/**
6387 * Unmap a page previously mapped by iemMemPageMap.
6388 *
6389 * @param pIemCpu The IEM per CPU data.
6390 * @param GCPhysMem The physical address.
6391 * @param fAccess The intended access.
6392 * @param pvMem What iemMemPageMap returned.
6393 * @param pLock The PGM lock.
6394 */
6395DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6396{
6397 NOREF(pIemCpu);
6398 NOREF(GCPhysMem);
6399 NOREF(fAccess);
6400 NOREF(pvMem);
6401 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6402}
6403
6404
6405/**
6406 * Looks up a memory mapping entry.
6407 *
6408 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6409 * @param pIemCpu The IEM per CPU data.
6410 * @param pvMem The memory address.
6411 * @param fAccess The access to.
6412 */
6413DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6414{
6415 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6416 if ( pIemCpu->aMemMappings[0].pv == pvMem
6417 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6418 return 0;
6419 if ( pIemCpu->aMemMappings[1].pv == pvMem
6420 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6421 return 1;
6422 if ( pIemCpu->aMemMappings[2].pv == pvMem
6423 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6424 return 2;
6425 return VERR_NOT_FOUND;
6426}
6427
6428
6429/**
6430 * Finds a free memmap entry when using iNextMapping doesn't work.
6431 *
6432 * @returns Memory mapping index, 1024 on failure.
6433 * @param pIemCpu The IEM per CPU data.
6434 */
6435IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6436{
6437 /*
6438 * The easy case.
6439 */
6440 if (pIemCpu->cActiveMappings == 0)
6441 {
6442 pIemCpu->iNextMapping = 1;
6443 return 0;
6444 }
6445
6446 /* There should be enough mappings for all instructions. */
6447 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6448
6449 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6450 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6451 return i;
6452
6453 AssertFailedReturn(1024);
6454}
6455
6456
6457/**
6458 * Commits a bounce buffer that needs writing back and unmaps it.
6459 *
6460 * @returns Strict VBox status code.
6461 * @param pIemCpu The IEM per CPU data.
6462 * @param iMemMap The index of the buffer to commit.
6463 */
6464IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6465{
6466 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6467 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6468
6469 /*
6470 * Do the writing.
6471 */
6472#ifndef IEM_VERIFICATION_MODE_MINIMAL
6473 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6474 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6475 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6476 {
6477 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6478 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6479 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6480 if (!pIemCpu->fBypassHandlers)
6481 {
6482 /*
6483 * Carefully and efficiently dealing with access handler return
6484 * codes make this a little bloated.
6485 */
6486 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6487 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6488 pbBuf,
6489 cbFirst,
6490 PGMACCESSORIGIN_IEM);
6491 if (rcStrict == VINF_SUCCESS)
6492 {
6493 if (cbSecond)
6494 {
6495 rcStrict = PGMPhysWrite(pVM,
6496 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6497 pbBuf + cbFirst,
6498 cbSecond,
6499 PGMACCESSORIGIN_IEM);
6500 if (rcStrict == VINF_SUCCESS)
6501 { /* nothing */ }
6502 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6503 {
6504 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6505 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6506 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6507 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6508 }
6509 else
6510 {
6511 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6512 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6513 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6514 return rcStrict;
6515 }
6516 }
6517 }
6518 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6519 {
6520 if (!cbSecond)
6521 {
6522 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6523 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6524 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6525 }
6526 else
6527 {
6528 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6529 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6530 pbBuf + cbFirst,
6531 cbSecond,
6532 PGMACCESSORIGIN_IEM);
6533 if (rcStrict2 == VINF_SUCCESS)
6534 {
6535 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6536 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6537 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6538 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6539 }
6540 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6541 {
6542 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6543 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6544 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6545 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6546 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6547 }
6548 else
6549 {
6550 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6551 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6552 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6553 return rcStrict2;
6554 }
6555 }
6556 }
6557 else
6558 {
6559 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6560 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6561 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6562 return rcStrict;
6563 }
6564 }
6565 else
6566 {
6567 /*
6568 * No access handlers, much simpler.
6569 */
6570 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6571 if (RT_SUCCESS(rc))
6572 {
6573 if (cbSecond)
6574 {
6575 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6576 if (RT_SUCCESS(rc))
6577 { /* likely */ }
6578 else
6579 {
6580 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6581 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6582 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6583 return rc;
6584 }
6585 }
6586 }
6587 else
6588 {
6589 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6590 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6591 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6592 return rc;
6593 }
6594 }
6595 }
6596#endif
6597
6598#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6599 /*
6600 * Record the write(s).
6601 */
6602 if (!pIemCpu->fNoRem)
6603 {
6604 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6605 if (pEvtRec)
6606 {
6607 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6608 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6609 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6610 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6611 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6612 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6613 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6614 }
6615 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6616 {
6617 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6618 if (pEvtRec)
6619 {
6620 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6621 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6622 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6623 memcpy(pEvtRec->u.RamWrite.ab,
6624 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6625 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6626 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6627 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6628 }
6629 }
6630 }
6631#endif
6632#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6633 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6634 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6635 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6636 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6637 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6638 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6639
6640 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6641 g_cbIemWrote = cbWrote;
6642 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6643#endif
6644
6645 /*
6646 * Free the mapping entry.
6647 */
6648 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6649 Assert(pIemCpu->cActiveMappings != 0);
6650 pIemCpu->cActiveMappings--;
6651 return VINF_SUCCESS;
6652}
6653
6654
6655/**
6656 * iemMemMap worker that deals with a request crossing pages.
6657 */
6658IEM_STATIC VBOXSTRICTRC
6659iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6660{
6661 /*
6662 * Do the address translations.
6663 */
6664 RTGCPHYS GCPhysFirst;
6665 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6666 if (rcStrict != VINF_SUCCESS)
6667 return rcStrict;
6668
6669/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6670 * last byte. */
6671 RTGCPHYS GCPhysSecond;
6672 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6673 if (rcStrict != VINF_SUCCESS)
6674 return rcStrict;
6675 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6676
6677 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6678#ifdef IEM_VERIFICATION_MODE_FULL
6679 /*
6680 * Detect problematic memory when verifying so we can select
6681 * the right execution engine. (TLB: Redo this.)
6682 */
6683 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6684 {
6685 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6686 if (RT_SUCCESS(rc2))
6687 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6688 if (RT_FAILURE(rc2))
6689 pIemCpu->fProblematicMemory = true;
6690 }
6691#endif
6692
6693
6694 /*
6695 * Read in the current memory content if it's a read, execute or partial
6696 * write access.
6697 */
6698 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6699 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6700 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6701
6702 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6703 {
6704 if (!pIemCpu->fBypassHandlers)
6705 {
6706 /*
6707 * Must carefully deal with access handler status codes here,
6708 * makes the code a bit bloated.
6709 */
6710 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6711 if (rcStrict == VINF_SUCCESS)
6712 {
6713 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6714 if (rcStrict == VINF_SUCCESS)
6715 { /*likely */ }
6716 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6717 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6718 else
6719 {
6720 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6721 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6722 return rcStrict;
6723 }
6724 }
6725 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6726 {
6727 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6728 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6729 {
6730 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6731 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6732 }
6733 else
6734 {
6735 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6736 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6737 return rcStrict2;
6738 }
6739 }
6740 else
6741 {
6742 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6743 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6744 return rcStrict;
6745 }
6746 }
6747 else
6748 {
6749 /*
6750 * No informational status codes here, much more straight forward.
6751 */
6752 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6753 if (RT_SUCCESS(rc))
6754 {
6755 Assert(rc == VINF_SUCCESS);
6756 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6757 if (RT_SUCCESS(rc))
6758 Assert(rc == VINF_SUCCESS);
6759 else
6760 {
6761 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6762 return rc;
6763 }
6764 }
6765 else
6766 {
6767 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6768 return rc;
6769 }
6770 }
6771
6772#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6773 if ( !pIemCpu->fNoRem
6774 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6775 {
6776 /*
6777 * Record the reads.
6778 */
6779 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6780 if (pEvtRec)
6781 {
6782 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6783 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6784 pEvtRec->u.RamRead.cb = cbFirstPage;
6785 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6786 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6787 }
6788 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6789 if (pEvtRec)
6790 {
6791 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6792 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6793 pEvtRec->u.RamRead.cb = cbSecondPage;
6794 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6795 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6796 }
6797 }
6798#endif
6799 }
6800#ifdef VBOX_STRICT
6801 else
6802 memset(pbBuf, 0xcc, cbMem);
6803 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6804 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6805#endif
6806
6807 /*
6808 * Commit the bounce buffer entry.
6809 */
6810 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6811 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6812 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6813 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6814 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6815 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6816 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6817 pIemCpu->iNextMapping = iMemMap + 1;
6818 pIemCpu->cActiveMappings++;
6819
6820 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6821 *ppvMem = pbBuf;
6822 return VINF_SUCCESS;
6823}
6824
6825
6826/**
6827 * iemMemMap woker that deals with iemMemPageMap failures.
6828 */
6829IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6830 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6831{
6832 /*
6833 * Filter out conditions we can handle and the ones which shouldn't happen.
6834 */
6835 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6836 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6837 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6838 {
6839 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6840 return rcMap;
6841 }
6842 pIemCpu->cPotentialExits++;
6843
6844 /*
6845 * Read in the current memory content if it's a read, execute or partial
6846 * write access.
6847 */
6848 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6849 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6850 {
6851 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6852 memset(pbBuf, 0xff, cbMem);
6853 else
6854 {
6855 int rc;
6856 if (!pIemCpu->fBypassHandlers)
6857 {
6858 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6859 if (rcStrict == VINF_SUCCESS)
6860 { /* nothing */ }
6861 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6862 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6863 else
6864 {
6865 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6866 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6867 return rcStrict;
6868 }
6869 }
6870 else
6871 {
6872 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6873 if (RT_SUCCESS(rc))
6874 { /* likely */ }
6875 else
6876 {
6877 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6878 GCPhysFirst, rc));
6879 return rc;
6880 }
6881 }
6882 }
6883
6884#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6885 if ( !pIemCpu->fNoRem
6886 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6887 {
6888 /*
6889 * Record the read.
6890 */
6891 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6892 if (pEvtRec)
6893 {
6894 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6895 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6896 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6897 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6898 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6899 }
6900 }
6901#endif
6902 }
6903#ifdef VBOX_STRICT
6904 else
6905 memset(pbBuf, 0xcc, cbMem);
6906#endif
6907#ifdef VBOX_STRICT
6908 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6909 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6910#endif
6911
6912 /*
6913 * Commit the bounce buffer entry.
6914 */
6915 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6916 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6917 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6918 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6919 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6920 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6921 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6922 pIemCpu->iNextMapping = iMemMap + 1;
6923 pIemCpu->cActiveMappings++;
6924
6925 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6926 *ppvMem = pbBuf;
6927 return VINF_SUCCESS;
6928}
6929
6930
6931
6932/**
6933 * Maps the specified guest memory for the given kind of access.
6934 *
6935 * This may be using bounce buffering of the memory if it's crossing a page
6936 * boundary or if there is an access handler installed for any of it. Because
6937 * of lock prefix guarantees, we're in for some extra clutter when this
6938 * happens.
6939 *
6940 * This may raise a \#GP, \#SS, \#PF or \#AC.
6941 *
6942 * @returns VBox strict status code.
6943 *
6944 * @param pIemCpu The IEM per CPU data.
6945 * @param ppvMem Where to return the pointer to the mapped
6946 * memory.
6947 * @param cbMem The number of bytes to map. This is usually 1,
6948 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6949 * string operations it can be up to a page.
6950 * @param iSegReg The index of the segment register to use for
6951 * this access. The base and limits are checked.
6952 * Use UINT8_MAX to indicate that no segmentation
6953 * is required (for IDT, GDT and LDT accesses).
6954 * @param GCPtrMem The address of the guest memory.
6955 * @param fAccess How the memory is being accessed. The
6956 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6957 * how to map the memory, while the
6958 * IEM_ACCESS_WHAT_XXX bit is used when raising
6959 * exceptions.
6960 */
6961IEM_STATIC VBOXSTRICTRC
6962iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
6963{
6964 /*
6965 * Check the input and figure out which mapping entry to use.
6966 */
6967 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6968 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6969
6970 unsigned iMemMap = pIemCpu->iNextMapping;
6971 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
6972 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6973 {
6974 iMemMap = iemMemMapFindFree(pIemCpu);
6975 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
6976 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
6977 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
6978 pIemCpu->aMemMappings[2].fAccess),
6979 VERR_IEM_IPE_9);
6980 }
6981
6982 /*
6983 * Map the memory, checking that we can actually access it. If something
6984 * slightly complicated happens, fall back on bounce buffering.
6985 */
6986 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6987 if (rcStrict != VINF_SUCCESS)
6988 return rcStrict;
6989
6990 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
6991 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6992
6993 RTGCPHYS GCPhysFirst;
6994 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
6995 if (rcStrict != VINF_SUCCESS)
6996 return rcStrict;
6997
6998 void *pvMem;
6999 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7000 if (rcStrict != VINF_SUCCESS)
7001 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
7002
7003 /*
7004 * Fill in the mapping table entry.
7005 */
7006 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7007 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7008 pIemCpu->iNextMapping = iMemMap + 1;
7009 pIemCpu->cActiveMappings++;
7010
7011 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7012 *ppvMem = pvMem;
7013 return VINF_SUCCESS;
7014}
7015
7016
7017/**
7018 * Commits the guest memory if bounce buffered and unmaps it.
7019 *
7020 * @returns Strict VBox status code.
7021 * @param pIemCpu The IEM per CPU data.
7022 * @param pvMem The mapping.
7023 * @param fAccess The kind of access.
7024 */
7025IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7026{
7027 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7028 AssertReturn(iMemMap >= 0, iMemMap);
7029
7030 /* If it's bounce buffered, we may need to write back the buffer. */
7031 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7032 {
7033 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7034 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7035 }
7036 /* Otherwise unlock it. */
7037 else
7038 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7039
7040 /* Free the entry. */
7041 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7042 Assert(pIemCpu->cActiveMappings != 0);
7043 pIemCpu->cActiveMappings--;
7044 return VINF_SUCCESS;
7045}
7046
7047
7048/**
7049 * Rollbacks mappings, releasing page locks and such.
7050 *
7051 * The caller shall only call this after checking cActiveMappings.
7052 *
7053 * @returns Strict VBox status code to pass up.
7054 * @param pIemCpu The IEM per CPU data.
7055 */
7056IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7057{
7058 Assert(pIemCpu->cActiveMappings > 0);
7059
7060 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7061 while (iMemMap-- > 0)
7062 {
7063 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7064 if (fAccess != IEM_ACCESS_INVALID)
7065 {
7066 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7067 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7068 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7069 Assert(pIemCpu->cActiveMappings > 0);
7070 pIemCpu->cActiveMappings--;
7071 }
7072 }
7073}
7074
7075
7076/**
7077 * Fetches a data byte.
7078 *
7079 * @returns Strict VBox status code.
7080 * @param pIemCpu The IEM per CPU data.
7081 * @param pu8Dst Where to return the byte.
7082 * @param iSegReg The index of the segment register to use for
7083 * this access. The base and limits are checked.
7084 * @param GCPtrMem The address of the guest memory.
7085 */
7086IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7087{
7088 /* The lazy approach for now... */
7089 uint8_t const *pu8Src;
7090 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7091 if (rc == VINF_SUCCESS)
7092 {
7093 *pu8Dst = *pu8Src;
7094 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7095 }
7096 return rc;
7097}
7098
7099
7100/**
7101 * Fetches a data word.
7102 *
7103 * @returns Strict VBox status code.
7104 * @param pIemCpu The IEM per CPU data.
7105 * @param pu16Dst Where to return the word.
7106 * @param iSegReg The index of the segment register to use for
7107 * this access. The base and limits are checked.
7108 * @param GCPtrMem The address of the guest memory.
7109 */
7110IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7111{
7112 /* The lazy approach for now... */
7113 uint16_t const *pu16Src;
7114 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7115 if (rc == VINF_SUCCESS)
7116 {
7117 *pu16Dst = *pu16Src;
7118 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7119 }
7120 return rc;
7121}
7122
7123
7124/**
7125 * Fetches a data dword.
7126 *
7127 * @returns Strict VBox status code.
7128 * @param pIemCpu The IEM per CPU data.
7129 * @param pu32Dst Where to return the dword.
7130 * @param iSegReg The index of the segment register to use for
7131 * this access. The base and limits are checked.
7132 * @param GCPtrMem The address of the guest memory.
7133 */
7134IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7135{
7136 /* The lazy approach for now... */
7137 uint32_t const *pu32Src;
7138 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7139 if (rc == VINF_SUCCESS)
7140 {
7141 *pu32Dst = *pu32Src;
7142 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7143 }
7144 return rc;
7145}
7146
7147
7148#ifdef SOME_UNUSED_FUNCTION
7149/**
7150 * Fetches a data dword and sign extends it to a qword.
7151 *
7152 * @returns Strict VBox status code.
7153 * @param pIemCpu The IEM per CPU data.
7154 * @param pu64Dst Where to return the sign extended value.
7155 * @param iSegReg The index of the segment register to use for
7156 * this access. The base and limits are checked.
7157 * @param GCPtrMem The address of the guest memory.
7158 */
7159IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7160{
7161 /* The lazy approach for now... */
7162 int32_t const *pi32Src;
7163 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7164 if (rc == VINF_SUCCESS)
7165 {
7166 *pu64Dst = *pi32Src;
7167 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7168 }
7169#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7170 else
7171 *pu64Dst = 0;
7172#endif
7173 return rc;
7174}
7175#endif
7176
7177
7178/**
7179 * Fetches a data qword.
7180 *
7181 * @returns Strict VBox status code.
7182 * @param pIemCpu The IEM per CPU data.
7183 * @param pu64Dst Where to return the qword.
7184 * @param iSegReg The index of the segment register to use for
7185 * this access. The base and limits are checked.
7186 * @param GCPtrMem The address of the guest memory.
7187 */
7188IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7189{
7190 /* The lazy approach for now... */
7191 uint64_t const *pu64Src;
7192 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7193 if (rc == VINF_SUCCESS)
7194 {
7195 *pu64Dst = *pu64Src;
7196 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7197 }
7198 return rc;
7199}
7200
7201
7202/**
7203 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7204 *
7205 * @returns Strict VBox status code.
7206 * @param pIemCpu The IEM per CPU data.
7207 * @param pu64Dst Where to return the qword.
7208 * @param iSegReg The index of the segment register to use for
7209 * this access. The base and limits are checked.
7210 * @param GCPtrMem The address of the guest memory.
7211 */
7212IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7213{
7214 /* The lazy approach for now... */
7215 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7216 if (RT_UNLIKELY(GCPtrMem & 15))
7217 return iemRaiseGeneralProtectionFault0(pIemCpu);
7218
7219 uint64_t const *pu64Src;
7220 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7221 if (rc == VINF_SUCCESS)
7222 {
7223 *pu64Dst = *pu64Src;
7224 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7225 }
7226 return rc;
7227}
7228
7229
7230/**
7231 * Fetches a data tword.
7232 *
7233 * @returns Strict VBox status code.
7234 * @param pIemCpu The IEM per CPU data.
7235 * @param pr80Dst Where to return the tword.
7236 * @param iSegReg The index of the segment register to use for
7237 * this access. The base and limits are checked.
7238 * @param GCPtrMem The address of the guest memory.
7239 */
7240IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7241{
7242 /* The lazy approach for now... */
7243 PCRTFLOAT80U pr80Src;
7244 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7245 if (rc == VINF_SUCCESS)
7246 {
7247 *pr80Dst = *pr80Src;
7248 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7249 }
7250 return rc;
7251}
7252
7253
7254/**
7255 * Fetches a data dqword (double qword), generally SSE related.
7256 *
7257 * @returns Strict VBox status code.
7258 * @param pIemCpu The IEM per CPU data.
7259 * @param pu128Dst Where to return the qword.
7260 * @param iSegReg The index of the segment register to use for
7261 * this access. The base and limits are checked.
7262 * @param GCPtrMem The address of the guest memory.
7263 */
7264IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7265{
7266 /* The lazy approach for now... */
7267 uint128_t const *pu128Src;
7268 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7269 if (rc == VINF_SUCCESS)
7270 {
7271 *pu128Dst = *pu128Src;
7272 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7273 }
7274 return rc;
7275}
7276
7277
7278/**
7279 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7280 * related.
7281 *
7282 * Raises \#GP(0) if not aligned.
7283 *
7284 * @returns Strict VBox status code.
7285 * @param pIemCpu The IEM per CPU data.
7286 * @param pu128Dst Where to return the qword.
7287 * @param iSegReg The index of the segment register to use for
7288 * this access. The base and limits are checked.
7289 * @param GCPtrMem The address of the guest memory.
7290 */
7291IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7292{
7293 /* The lazy approach for now... */
7294 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7295 if ( (GCPtrMem & 15)
7296 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7297 return iemRaiseGeneralProtectionFault0(pIemCpu);
7298
7299 uint128_t const *pu128Src;
7300 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7301 if (rc == VINF_SUCCESS)
7302 {
7303 *pu128Dst = *pu128Src;
7304 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7305 }
7306 return rc;
7307}
7308
7309
7310
7311
7312/**
7313 * Fetches a descriptor register (lgdt, lidt).
7314 *
7315 * @returns Strict VBox status code.
7316 * @param pIemCpu The IEM per CPU data.
7317 * @param pcbLimit Where to return the limit.
7318 * @param pGCPtrBase Where to return the base.
7319 * @param iSegReg The index of the segment register to use for
7320 * this access. The base and limits are checked.
7321 * @param GCPtrMem The address of the guest memory.
7322 * @param enmOpSize The effective operand size.
7323 */
7324IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7325 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7326{
7327 uint8_t const *pu8Src;
7328 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7329 (void **)&pu8Src,
7330 enmOpSize == IEMMODE_64BIT
7331 ? 2 + 8
7332 : enmOpSize == IEMMODE_32BIT
7333 ? 2 + 4
7334 : 2 + 3,
7335 iSegReg,
7336 GCPtrMem,
7337 IEM_ACCESS_DATA_R);
7338 if (rcStrict == VINF_SUCCESS)
7339 {
7340 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7341 switch (enmOpSize)
7342 {
7343 case IEMMODE_16BIT:
7344 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7345 break;
7346 case IEMMODE_32BIT:
7347 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7348 break;
7349 case IEMMODE_64BIT:
7350 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7351 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7352 break;
7353
7354 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7355 }
7356 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7357 }
7358 return rcStrict;
7359}
7360
7361
7362
7363/**
7364 * Stores a data byte.
7365 *
7366 * @returns Strict VBox status code.
7367 * @param pIemCpu The IEM per CPU data.
7368 * @param iSegReg The index of the segment register to use for
7369 * this access. The base and limits are checked.
7370 * @param GCPtrMem The address of the guest memory.
7371 * @param u8Value The value to store.
7372 */
7373IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7374{
7375 /* The lazy approach for now... */
7376 uint8_t *pu8Dst;
7377 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7378 if (rc == VINF_SUCCESS)
7379 {
7380 *pu8Dst = u8Value;
7381 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7382 }
7383 return rc;
7384}
7385
7386
7387/**
7388 * Stores a data word.
7389 *
7390 * @returns Strict VBox status code.
7391 * @param pIemCpu The IEM per CPU data.
7392 * @param iSegReg The index of the segment register to use for
7393 * this access. The base and limits are checked.
7394 * @param GCPtrMem The address of the guest memory.
7395 * @param u16Value The value to store.
7396 */
7397IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7398{
7399 /* The lazy approach for now... */
7400 uint16_t *pu16Dst;
7401 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7402 if (rc == VINF_SUCCESS)
7403 {
7404 *pu16Dst = u16Value;
7405 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7406 }
7407 return rc;
7408}
7409
7410
7411/**
7412 * Stores a data dword.
7413 *
7414 * @returns Strict VBox status code.
7415 * @param pIemCpu The IEM per CPU data.
7416 * @param iSegReg The index of the segment register to use for
7417 * this access. The base and limits are checked.
7418 * @param GCPtrMem The address of the guest memory.
7419 * @param u32Value The value to store.
7420 */
7421IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7422{
7423 /* The lazy approach for now... */
7424 uint32_t *pu32Dst;
7425 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7426 if (rc == VINF_SUCCESS)
7427 {
7428 *pu32Dst = u32Value;
7429 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7430 }
7431 return rc;
7432}
7433
7434
7435/**
7436 * Stores a data qword.
7437 *
7438 * @returns Strict VBox status code.
7439 * @param pIemCpu The IEM per CPU data.
7440 * @param iSegReg The index of the segment register to use for
7441 * this access. The base and limits are checked.
7442 * @param GCPtrMem The address of the guest memory.
7443 * @param u64Value The value to store.
7444 */
7445IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7446{
7447 /* The lazy approach for now... */
7448 uint64_t *pu64Dst;
7449 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7450 if (rc == VINF_SUCCESS)
7451 {
7452 *pu64Dst = u64Value;
7453 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7454 }
7455 return rc;
7456}
7457
7458
7459/**
7460 * Stores a data dqword.
7461 *
7462 * @returns Strict VBox status code.
7463 * @param pIemCpu The IEM per CPU data.
7464 * @param iSegReg The index of the segment register to use for
7465 * this access. The base and limits are checked.
7466 * @param GCPtrMem The address of the guest memory.
7467 * @param u128Value The value to store.
7468 */
7469IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7470{
7471 /* The lazy approach for now... */
7472 uint128_t *pu128Dst;
7473 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7474 if (rc == VINF_SUCCESS)
7475 {
7476 *pu128Dst = u128Value;
7477 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7478 }
7479 return rc;
7480}
7481
7482
7483/**
7484 * Stores a data dqword, SSE aligned.
7485 *
7486 * @returns Strict VBox status code.
7487 * @param pIemCpu The IEM per CPU data.
7488 * @param iSegReg The index of the segment register to use for
7489 * this access. The base and limits are checked.
7490 * @param GCPtrMem The address of the guest memory.
7491 * @param u128Value The value to store.
7492 */
7493IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7494{
7495 /* The lazy approach for now... */
7496 if ( (GCPtrMem & 15)
7497 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7498 return iemRaiseGeneralProtectionFault0(pIemCpu);
7499
7500 uint128_t *pu128Dst;
7501 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7502 if (rc == VINF_SUCCESS)
7503 {
7504 *pu128Dst = u128Value;
7505 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7506 }
7507 return rc;
7508}
7509
7510
7511/**
7512 * Stores a descriptor register (sgdt, sidt).
7513 *
7514 * @returns Strict VBox status code.
7515 * @param pIemCpu The IEM per CPU data.
7516 * @param cbLimit The limit.
7517 * @param GCPtrBase The base address.
7518 * @param iSegReg The index of the segment register to use for
7519 * this access. The base and limits are checked.
7520 * @param GCPtrMem The address of the guest memory.
7521 * @param enmOpSize The effective operand size.
7522 */
7523IEM_STATIC VBOXSTRICTRC
7524iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7525{
7526 uint8_t *pu8Src;
7527 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7528 (void **)&pu8Src,
7529 enmOpSize == IEMMODE_64BIT
7530 ? 2 + 8
7531 : enmOpSize == IEMMODE_32BIT
7532 ? 2 + 4
7533 : 2 + 3,
7534 iSegReg,
7535 GCPtrMem,
7536 IEM_ACCESS_DATA_W);
7537 if (rcStrict == VINF_SUCCESS)
7538 {
7539 pu8Src[0] = RT_BYTE1(cbLimit);
7540 pu8Src[1] = RT_BYTE2(cbLimit);
7541 pu8Src[2] = RT_BYTE1(GCPtrBase);
7542 pu8Src[3] = RT_BYTE2(GCPtrBase);
7543 pu8Src[4] = RT_BYTE3(GCPtrBase);
7544 if (enmOpSize == IEMMODE_16BIT)
7545 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7546 else
7547 {
7548 pu8Src[5] = RT_BYTE4(GCPtrBase);
7549 if (enmOpSize == IEMMODE_64BIT)
7550 {
7551 pu8Src[6] = RT_BYTE5(GCPtrBase);
7552 pu8Src[7] = RT_BYTE6(GCPtrBase);
7553 pu8Src[8] = RT_BYTE7(GCPtrBase);
7554 pu8Src[9] = RT_BYTE8(GCPtrBase);
7555 }
7556 }
7557 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7558 }
7559 return rcStrict;
7560}
7561
7562
7563/**
7564 * Pushes a word onto the stack.
7565 *
7566 * @returns Strict VBox status code.
7567 * @param pIemCpu The IEM per CPU data.
7568 * @param u16Value The value to push.
7569 */
7570IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7571{
7572 /* Increment the stack pointer. */
7573 uint64_t uNewRsp;
7574 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7575 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7576
7577 /* Write the word the lazy way. */
7578 uint16_t *pu16Dst;
7579 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7580 if (rc == VINF_SUCCESS)
7581 {
7582 *pu16Dst = u16Value;
7583 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7584 }
7585
7586 /* Commit the new RSP value unless we an access handler made trouble. */
7587 if (rc == VINF_SUCCESS)
7588 pCtx->rsp = uNewRsp;
7589
7590 return rc;
7591}
7592
7593
7594/**
7595 * Pushes a dword onto the stack.
7596 *
7597 * @returns Strict VBox status code.
7598 * @param pIemCpu The IEM per CPU data.
7599 * @param u32Value The value to push.
7600 */
7601IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7602{
7603 /* Increment the stack pointer. */
7604 uint64_t uNewRsp;
7605 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7606 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7607
7608 /* Write the dword the lazy way. */
7609 uint32_t *pu32Dst;
7610 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7611 if (rc == VINF_SUCCESS)
7612 {
7613 *pu32Dst = u32Value;
7614 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7615 }
7616
7617 /* Commit the new RSP value unless we an access handler made trouble. */
7618 if (rc == VINF_SUCCESS)
7619 pCtx->rsp = uNewRsp;
7620
7621 return rc;
7622}
7623
7624
7625/**
7626 * Pushes a dword segment register value onto the stack.
7627 *
7628 * @returns Strict VBox status code.
7629 * @param pIemCpu The IEM per CPU data.
7630 * @param u32Value The value to push.
7631 */
7632IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7633{
7634 /* Increment the stack pointer. */
7635 uint64_t uNewRsp;
7636 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7637 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7638
7639 VBOXSTRICTRC rc;
7640 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7641 {
7642 /* The recompiler writes a full dword. */
7643 uint32_t *pu32Dst;
7644 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7645 if (rc == VINF_SUCCESS)
7646 {
7647 *pu32Dst = u32Value;
7648 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7649 }
7650 }
7651 else
7652 {
7653 /* The intel docs talks about zero extending the selector register
7654 value. My actual intel CPU here might be zero extending the value
7655 but it still only writes the lower word... */
7656 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7657 * happens when crossing an electric page boundrary, is the high word
7658 * checked for write accessibility or not? Probably it is. What about
7659 * segment limits? */
7660 uint16_t *pu16Dst;
7661 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7662 if (rc == VINF_SUCCESS)
7663 {
7664 *pu16Dst = (uint16_t)u32Value;
7665 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7666 }
7667 }
7668
7669 /* Commit the new RSP value unless we an access handler made trouble. */
7670 if (rc == VINF_SUCCESS)
7671 pCtx->rsp = uNewRsp;
7672
7673 return rc;
7674}
7675
7676
7677/**
7678 * Pushes a qword onto the stack.
7679 *
7680 * @returns Strict VBox status code.
7681 * @param pIemCpu The IEM per CPU data.
7682 * @param u64Value The value to push.
7683 */
7684IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7685{
7686 /* Increment the stack pointer. */
7687 uint64_t uNewRsp;
7688 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7689 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7690
7691 /* Write the word the lazy way. */
7692 uint64_t *pu64Dst;
7693 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7694 if (rc == VINF_SUCCESS)
7695 {
7696 *pu64Dst = u64Value;
7697 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7698 }
7699
7700 /* Commit the new RSP value unless we an access handler made trouble. */
7701 if (rc == VINF_SUCCESS)
7702 pCtx->rsp = uNewRsp;
7703
7704 return rc;
7705}
7706
7707
7708/**
7709 * Pops a word from the stack.
7710 *
7711 * @returns Strict VBox status code.
7712 * @param pIemCpu The IEM per CPU data.
7713 * @param pu16Value Where to store the popped value.
7714 */
7715IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7716{
7717 /* Increment the stack pointer. */
7718 uint64_t uNewRsp;
7719 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7720 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7721
7722 /* Write the word the lazy way. */
7723 uint16_t const *pu16Src;
7724 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7725 if (rc == VINF_SUCCESS)
7726 {
7727 *pu16Value = *pu16Src;
7728 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7729
7730 /* Commit the new RSP value. */
7731 if (rc == VINF_SUCCESS)
7732 pCtx->rsp = uNewRsp;
7733 }
7734
7735 return rc;
7736}
7737
7738
7739/**
7740 * Pops a dword from the stack.
7741 *
7742 * @returns Strict VBox status code.
7743 * @param pIemCpu The IEM per CPU data.
7744 * @param pu32Value Where to store the popped value.
7745 */
7746IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7747{
7748 /* Increment the stack pointer. */
7749 uint64_t uNewRsp;
7750 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7751 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7752
7753 /* Write the word the lazy way. */
7754 uint32_t const *pu32Src;
7755 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7756 if (rc == VINF_SUCCESS)
7757 {
7758 *pu32Value = *pu32Src;
7759 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7760
7761 /* Commit the new RSP value. */
7762 if (rc == VINF_SUCCESS)
7763 pCtx->rsp = uNewRsp;
7764 }
7765
7766 return rc;
7767}
7768
7769
7770/**
7771 * Pops a qword from the stack.
7772 *
7773 * @returns Strict VBox status code.
7774 * @param pIemCpu The IEM per CPU data.
7775 * @param pu64Value Where to store the popped value.
7776 */
7777IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7778{
7779 /* Increment the stack pointer. */
7780 uint64_t uNewRsp;
7781 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7782 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7783
7784 /* Write the word the lazy way. */
7785 uint64_t const *pu64Src;
7786 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7787 if (rc == VINF_SUCCESS)
7788 {
7789 *pu64Value = *pu64Src;
7790 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7791
7792 /* Commit the new RSP value. */
7793 if (rc == VINF_SUCCESS)
7794 pCtx->rsp = uNewRsp;
7795 }
7796
7797 return rc;
7798}
7799
7800
7801/**
7802 * Pushes a word onto the stack, using a temporary stack pointer.
7803 *
7804 * @returns Strict VBox status code.
7805 * @param pIemCpu The IEM per CPU data.
7806 * @param u16Value The value to push.
7807 * @param pTmpRsp Pointer to the temporary stack pointer.
7808 */
7809IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7810{
7811 /* Increment the stack pointer. */
7812 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7813 RTUINT64U NewRsp = *pTmpRsp;
7814 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7815
7816 /* Write the word the lazy way. */
7817 uint16_t *pu16Dst;
7818 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7819 if (rc == VINF_SUCCESS)
7820 {
7821 *pu16Dst = u16Value;
7822 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7823 }
7824
7825 /* Commit the new RSP value unless we an access handler made trouble. */
7826 if (rc == VINF_SUCCESS)
7827 *pTmpRsp = NewRsp;
7828
7829 return rc;
7830}
7831
7832
7833/**
7834 * Pushes a dword onto the stack, using a temporary stack pointer.
7835 *
7836 * @returns Strict VBox status code.
7837 * @param pIemCpu The IEM per CPU data.
7838 * @param u32Value The value to push.
7839 * @param pTmpRsp Pointer to the temporary stack pointer.
7840 */
7841IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7842{
7843 /* Increment the stack pointer. */
7844 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7845 RTUINT64U NewRsp = *pTmpRsp;
7846 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7847
7848 /* Write the word the lazy way. */
7849 uint32_t *pu32Dst;
7850 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7851 if (rc == VINF_SUCCESS)
7852 {
7853 *pu32Dst = u32Value;
7854 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7855 }
7856
7857 /* Commit the new RSP value unless we an access handler made trouble. */
7858 if (rc == VINF_SUCCESS)
7859 *pTmpRsp = NewRsp;
7860
7861 return rc;
7862}
7863
7864
7865/**
7866 * Pushes a dword onto the stack, using a temporary stack pointer.
7867 *
7868 * @returns Strict VBox status code.
7869 * @param pIemCpu The IEM per CPU data.
7870 * @param u64Value The value to push.
7871 * @param pTmpRsp Pointer to the temporary stack pointer.
7872 */
7873IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7874{
7875 /* Increment the stack pointer. */
7876 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7877 RTUINT64U NewRsp = *pTmpRsp;
7878 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7879
7880 /* Write the word the lazy way. */
7881 uint64_t *pu64Dst;
7882 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7883 if (rc == VINF_SUCCESS)
7884 {
7885 *pu64Dst = u64Value;
7886 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7887 }
7888
7889 /* Commit the new RSP value unless we an access handler made trouble. */
7890 if (rc == VINF_SUCCESS)
7891 *pTmpRsp = NewRsp;
7892
7893 return rc;
7894}
7895
7896
7897/**
7898 * Pops a word from the stack, using a temporary stack pointer.
7899 *
7900 * @returns Strict VBox status code.
7901 * @param pIemCpu The IEM per CPU data.
7902 * @param pu16Value Where to store the popped value.
7903 * @param pTmpRsp Pointer to the temporary stack pointer.
7904 */
7905IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7906{
7907 /* Increment the stack pointer. */
7908 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7909 RTUINT64U NewRsp = *pTmpRsp;
7910 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7911
7912 /* Write the word the lazy way. */
7913 uint16_t const *pu16Src;
7914 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7915 if (rc == VINF_SUCCESS)
7916 {
7917 *pu16Value = *pu16Src;
7918 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7919
7920 /* Commit the new RSP value. */
7921 if (rc == VINF_SUCCESS)
7922 *pTmpRsp = NewRsp;
7923 }
7924
7925 return rc;
7926}
7927
7928
7929/**
7930 * Pops a dword from the stack, using a temporary stack pointer.
7931 *
7932 * @returns Strict VBox status code.
7933 * @param pIemCpu The IEM per CPU data.
7934 * @param pu32Value Where to store the popped value.
7935 * @param pTmpRsp Pointer to the temporary stack pointer.
7936 */
7937IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7938{
7939 /* Increment the stack pointer. */
7940 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7941 RTUINT64U NewRsp = *pTmpRsp;
7942 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7943
7944 /* Write the word the lazy way. */
7945 uint32_t const *pu32Src;
7946 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7947 if (rc == VINF_SUCCESS)
7948 {
7949 *pu32Value = *pu32Src;
7950 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7951
7952 /* Commit the new RSP value. */
7953 if (rc == VINF_SUCCESS)
7954 *pTmpRsp = NewRsp;
7955 }
7956
7957 return rc;
7958}
7959
7960
7961/**
7962 * Pops a qword from the stack, using a temporary stack pointer.
7963 *
7964 * @returns Strict VBox status code.
7965 * @param pIemCpu The IEM per CPU data.
7966 * @param pu64Value Where to store the popped value.
7967 * @param pTmpRsp Pointer to the temporary stack pointer.
7968 */
7969IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
7970{
7971 /* Increment the stack pointer. */
7972 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7973 RTUINT64U NewRsp = *pTmpRsp;
7974 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7975
7976 /* Write the word the lazy way. */
7977 uint64_t const *pu64Src;
7978 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7979 if (rcStrict == VINF_SUCCESS)
7980 {
7981 *pu64Value = *pu64Src;
7982 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7983
7984 /* Commit the new RSP value. */
7985 if (rcStrict == VINF_SUCCESS)
7986 *pTmpRsp = NewRsp;
7987 }
7988
7989 return rcStrict;
7990}
7991
7992
7993/**
7994 * Begin a special stack push (used by interrupt, exceptions and such).
7995 *
7996 * This will raise \#SS or \#PF if appropriate.
7997 *
7998 * @returns Strict VBox status code.
7999 * @param pIemCpu The IEM per CPU data.
8000 * @param cbMem The number of bytes to push onto the stack.
8001 * @param ppvMem Where to return the pointer to the stack memory.
8002 * As with the other memory functions this could be
8003 * direct access or bounce buffered access, so
8004 * don't commit register until the commit call
8005 * succeeds.
8006 * @param puNewRsp Where to return the new RSP value. This must be
8007 * passed unchanged to
8008 * iemMemStackPushCommitSpecial().
8009 */
8010IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8011{
8012 Assert(cbMem < UINT8_MAX);
8013 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8014 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8015 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8016}
8017
8018
8019/**
8020 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8021 *
8022 * This will update the rSP.
8023 *
8024 * @returns Strict VBox status code.
8025 * @param pIemCpu The IEM per CPU data.
8026 * @param pvMem The pointer returned by
8027 * iemMemStackPushBeginSpecial().
8028 * @param uNewRsp The new RSP value returned by
8029 * iemMemStackPushBeginSpecial().
8030 */
8031IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8032{
8033 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8034 if (rcStrict == VINF_SUCCESS)
8035 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8036 return rcStrict;
8037}
8038
8039
8040/**
8041 * Begin a special stack pop (used by iret, retf and such).
8042 *
8043 * This will raise \#SS or \#PF if appropriate.
8044 *
8045 * @returns Strict VBox status code.
8046 * @param pIemCpu The IEM per CPU data.
8047 * @param cbMem The number of bytes to push onto the stack.
8048 * @param ppvMem Where to return the pointer to the stack memory.
8049 * @param puNewRsp Where to return the new RSP value. This must be
8050 * passed unchanged to
8051 * iemMemStackPopCommitSpecial() or applied
8052 * manually if iemMemStackPopDoneSpecial() is used.
8053 */
8054IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8055{
8056 Assert(cbMem < UINT8_MAX);
8057 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8058 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8059 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8060}
8061
8062
8063/**
8064 * Continue a special stack pop (used by iret and retf).
8065 *
8066 * This will raise \#SS or \#PF if appropriate.
8067 *
8068 * @returns Strict VBox status code.
8069 * @param pIemCpu The IEM per CPU data.
8070 * @param cbMem The number of bytes to push onto the stack.
8071 * @param ppvMem Where to return the pointer to the stack memory.
8072 * @param puNewRsp Where to return the new RSP value. This must be
8073 * passed unchanged to
8074 * iemMemStackPopCommitSpecial() or applied
8075 * manually if iemMemStackPopDoneSpecial() is used.
8076 */
8077IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8078{
8079 Assert(cbMem < UINT8_MAX);
8080 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8081 RTUINT64U NewRsp;
8082 NewRsp.u = *puNewRsp;
8083 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8084 *puNewRsp = NewRsp.u;
8085 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8086}
8087
8088
8089/**
8090 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8091 *
8092 * This will update the rSP.
8093 *
8094 * @returns Strict VBox status code.
8095 * @param pIemCpu The IEM per CPU data.
8096 * @param pvMem The pointer returned by
8097 * iemMemStackPopBeginSpecial().
8098 * @param uNewRsp The new RSP value returned by
8099 * iemMemStackPopBeginSpecial().
8100 */
8101IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8102{
8103 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8104 if (rcStrict == VINF_SUCCESS)
8105 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8106 return rcStrict;
8107}
8108
8109
8110/**
8111 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8112 * iemMemStackPopContinueSpecial).
8113 *
8114 * The caller will manually commit the rSP.
8115 *
8116 * @returns Strict VBox status code.
8117 * @param pIemCpu The IEM per CPU data.
8118 * @param pvMem The pointer returned by
8119 * iemMemStackPopBeginSpecial() or
8120 * iemMemStackPopContinueSpecial().
8121 */
8122IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8123{
8124 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8125}
8126
8127
8128/**
8129 * Fetches a system table byte.
8130 *
8131 * @returns Strict VBox status code.
8132 * @param pIemCpu The IEM per CPU data.
8133 * @param pbDst Where to return the byte.
8134 * @param iSegReg The index of the segment register to use for
8135 * this access. The base and limits are checked.
8136 * @param GCPtrMem The address of the guest memory.
8137 */
8138IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8139{
8140 /* The lazy approach for now... */
8141 uint8_t const *pbSrc;
8142 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8143 if (rc == VINF_SUCCESS)
8144 {
8145 *pbDst = *pbSrc;
8146 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8147 }
8148 return rc;
8149}
8150
8151
8152/**
8153 * Fetches a system table word.
8154 *
8155 * @returns Strict VBox status code.
8156 * @param pIemCpu The IEM per CPU data.
8157 * @param pu16Dst Where to return the word.
8158 * @param iSegReg The index of the segment register to use for
8159 * this access. The base and limits are checked.
8160 * @param GCPtrMem The address of the guest memory.
8161 */
8162IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8163{
8164 /* The lazy approach for now... */
8165 uint16_t const *pu16Src;
8166 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8167 if (rc == VINF_SUCCESS)
8168 {
8169 *pu16Dst = *pu16Src;
8170 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8171 }
8172 return rc;
8173}
8174
8175
8176/**
8177 * Fetches a system table dword.
8178 *
8179 * @returns Strict VBox status code.
8180 * @param pIemCpu The IEM per CPU data.
8181 * @param pu32Dst Where to return the dword.
8182 * @param iSegReg The index of the segment register to use for
8183 * this access. The base and limits are checked.
8184 * @param GCPtrMem The address of the guest memory.
8185 */
8186IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8187{
8188 /* The lazy approach for now... */
8189 uint32_t const *pu32Src;
8190 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8191 if (rc == VINF_SUCCESS)
8192 {
8193 *pu32Dst = *pu32Src;
8194 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8195 }
8196 return rc;
8197}
8198
8199
8200/**
8201 * Fetches a system table qword.
8202 *
8203 * @returns Strict VBox status code.
8204 * @param pIemCpu The IEM per CPU data.
8205 * @param pu64Dst Where to return the qword.
8206 * @param iSegReg The index of the segment register to use for
8207 * this access. The base and limits are checked.
8208 * @param GCPtrMem The address of the guest memory.
8209 */
8210IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8211{
8212 /* The lazy approach for now... */
8213 uint64_t const *pu64Src;
8214 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8215 if (rc == VINF_SUCCESS)
8216 {
8217 *pu64Dst = *pu64Src;
8218 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8219 }
8220 return rc;
8221}
8222
8223
8224/**
8225 * Fetches a descriptor table entry with caller specified error code.
8226 *
8227 * @returns Strict VBox status code.
8228 * @param pIemCpu The IEM per CPU.
8229 * @param pDesc Where to return the descriptor table entry.
8230 * @param uSel The selector which table entry to fetch.
8231 * @param uXcpt The exception to raise on table lookup error.
8232 * @param uErrorCode The error code associated with the exception.
8233 */
8234IEM_STATIC VBOXSTRICTRC
8235iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8236{
8237 AssertPtr(pDesc);
8238 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8239
8240 /** @todo did the 286 require all 8 bytes to be accessible? */
8241 /*
8242 * Get the selector table base and check bounds.
8243 */
8244 RTGCPTR GCPtrBase;
8245 if (uSel & X86_SEL_LDT)
8246 {
8247 if ( !pCtx->ldtr.Attr.n.u1Present
8248 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8249 {
8250 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8251 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8252 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8253 uErrorCode, 0);
8254 }
8255
8256 Assert(pCtx->ldtr.Attr.n.u1Present);
8257 GCPtrBase = pCtx->ldtr.u64Base;
8258 }
8259 else
8260 {
8261 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8262 {
8263 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8264 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8265 uErrorCode, 0);
8266 }
8267 GCPtrBase = pCtx->gdtr.pGdt;
8268 }
8269
8270 /*
8271 * Read the legacy descriptor and maybe the long mode extensions if
8272 * required.
8273 */
8274 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8275 if (rcStrict == VINF_SUCCESS)
8276 {
8277 if ( !IEM_IS_LONG_MODE(pIemCpu)
8278 || pDesc->Legacy.Gen.u1DescType)
8279 pDesc->Long.au64[1] = 0;
8280 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8281 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8282 else
8283 {
8284 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8285 /** @todo is this the right exception? */
8286 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8287 }
8288 }
8289 return rcStrict;
8290}
8291
8292
8293/**
8294 * Fetches a descriptor table entry.
8295 *
8296 * @returns Strict VBox status code.
8297 * @param pIemCpu The IEM per CPU.
8298 * @param pDesc Where to return the descriptor table entry.
8299 * @param uSel The selector which table entry to fetch.
8300 * @param uXcpt The exception to raise on table lookup error.
8301 */
8302IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8303{
8304 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8305}
8306
8307
8308/**
8309 * Fakes a long mode stack selector for SS = 0.
8310 *
8311 * @param pDescSs Where to return the fake stack descriptor.
8312 * @param uDpl The DPL we want.
8313 */
8314IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8315{
8316 pDescSs->Long.au64[0] = 0;
8317 pDescSs->Long.au64[1] = 0;
8318 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8319 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8320 pDescSs->Long.Gen.u2Dpl = uDpl;
8321 pDescSs->Long.Gen.u1Present = 1;
8322 pDescSs->Long.Gen.u1Long = 1;
8323}
8324
8325
8326/**
8327 * Marks the selector descriptor as accessed (only non-system descriptors).
8328 *
8329 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8330 * will therefore skip the limit checks.
8331 *
8332 * @returns Strict VBox status code.
8333 * @param pIemCpu The IEM per CPU.
8334 * @param uSel The selector.
8335 */
8336IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8337{
8338 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8339
8340 /*
8341 * Get the selector table base and calculate the entry address.
8342 */
8343 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8344 ? pCtx->ldtr.u64Base
8345 : pCtx->gdtr.pGdt;
8346 GCPtr += uSel & X86_SEL_MASK;
8347
8348 /*
8349 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8350 * ugly stuff to avoid this. This will make sure it's an atomic access
8351 * as well more or less remove any question about 8-bit or 32-bit accesss.
8352 */
8353 VBOXSTRICTRC rcStrict;
8354 uint32_t volatile *pu32;
8355 if ((GCPtr & 3) == 0)
8356 {
8357 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8358 GCPtr += 2 + 2;
8359 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8360 if (rcStrict != VINF_SUCCESS)
8361 return rcStrict;
8362 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8363 }
8364 else
8365 {
8366 /* The misaligned GDT/LDT case, map the whole thing. */
8367 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8368 if (rcStrict != VINF_SUCCESS)
8369 return rcStrict;
8370 switch ((uintptr_t)pu32 & 3)
8371 {
8372 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8373 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8374 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8375 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8376 }
8377 }
8378
8379 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8380}
8381
8382/** @} */
8383
8384
8385/*
8386 * Include the C/C++ implementation of instruction.
8387 */
8388#include "IEMAllCImpl.cpp.h"
8389
8390
8391
8392/** @name "Microcode" macros.
8393 *
8394 * The idea is that we should be able to use the same code to interpret
8395 * instructions as well as recompiler instructions. Thus this obfuscation.
8396 *
8397 * @{
8398 */
8399#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8400#define IEM_MC_END() }
8401#define IEM_MC_PAUSE() do {} while (0)
8402#define IEM_MC_CONTINUE() do {} while (0)
8403
8404/** Internal macro. */
8405#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8406 do \
8407 { \
8408 VBOXSTRICTRC rcStrict2 = a_Expr; \
8409 if (rcStrict2 != VINF_SUCCESS) \
8410 return rcStrict2; \
8411 } while (0)
8412
8413#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8414#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8415#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8416#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8417#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8418#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8419#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8420
8421#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8422#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8423 do { \
8424 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8425 return iemRaiseDeviceNotAvailable(pIemCpu); \
8426 } while (0)
8427#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8428 do { \
8429 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8430 return iemRaiseMathFault(pIemCpu); \
8431 } while (0)
8432#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8433 do { \
8434 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8435 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8436 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8437 return iemRaiseUndefinedOpcode(pIemCpu); \
8438 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8439 return iemRaiseDeviceNotAvailable(pIemCpu); \
8440 } while (0)
8441#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8442 do { \
8443 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8444 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8445 return iemRaiseUndefinedOpcode(pIemCpu); \
8446 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8447 return iemRaiseDeviceNotAvailable(pIemCpu); \
8448 } while (0)
8449#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8450 do { \
8451 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8452 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8453 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8454 return iemRaiseUndefinedOpcode(pIemCpu); \
8455 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8456 return iemRaiseDeviceNotAvailable(pIemCpu); \
8457 } while (0)
8458#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8459 do { \
8460 if (pIemCpu->uCpl != 0) \
8461 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8462 } while (0)
8463
8464
8465#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8466#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8467#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8468#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8469#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8470#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8471#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8472 uint32_t a_Name; \
8473 uint32_t *a_pName = &a_Name
8474#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8475 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8476
8477#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8478#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8479
8480#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8481#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8482#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8483#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8484#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8485#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8486#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8487#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8488#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8489#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8490#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8491#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8492#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8493#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8494#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8495#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8496#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8497#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8498#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8499#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8500#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8501#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8502#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8503#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8504#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8505#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8506#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8507#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8508#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8509/** @note Not for IOPL or IF testing or modification. */
8510#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8511#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8512#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8513#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8514
8515#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8516#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8517#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8518#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8519#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8520#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8521#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8522#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8523#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8524#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8525#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8526 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8527
8528#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8529#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8530/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8531 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8532#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8533#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8534/** @note Not for IOPL or IF testing or modification. */
8535#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8536
8537#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8538#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8539#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8540 do { \
8541 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8542 *pu32Reg += (a_u32Value); \
8543 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8544 } while (0)
8545#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8546
8547#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8548#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8549#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8550 do { \
8551 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8552 *pu32Reg -= (a_u32Value); \
8553 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8554 } while (0)
8555#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8556
8557#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8558#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8559#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8560#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8561#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8562#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8563#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8564
8565#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8566#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8567#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8568#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8569
8570#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8571#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8572#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8573
8574#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8575#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8576
8577#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8578#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8579#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8580
8581#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8582#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8583#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8584
8585#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8586
8587#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8588
8589#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8590#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8591#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8592 do { \
8593 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8594 *pu32Reg &= (a_u32Value); \
8595 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8596 } while (0)
8597#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8598
8599#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8600#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8601#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8602 do { \
8603 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8604 *pu32Reg |= (a_u32Value); \
8605 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8606 } while (0)
8607#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8608
8609
8610/** @note Not for IOPL or IF modification. */
8611#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8612/** @note Not for IOPL or IF modification. */
8613#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8614/** @note Not for IOPL or IF modification. */
8615#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8616
8617#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8618
8619
8620#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8621 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8622#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8623 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8624#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8625 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8626#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8627 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8628#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8629 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8630#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8631 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8632#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8633 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8634
8635#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8636 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8637#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8638 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8639#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8640 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8641#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8642 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8643#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8644 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8645 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8646 } while (0)
8647#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8648 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8649 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8650 } while (0)
8651#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8652 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8653#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8654 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8655#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8656 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8657
8658#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8659 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8660#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8661 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8662#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8663 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8664
8665#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8666 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8667#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8668 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8669#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8670 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8671
8672#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8673 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8674#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8675 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8676#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8678
8679#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8680 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8681
8682#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8684#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8685 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8686#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8687 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8688#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8689 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8690
8691#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8693#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8695#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8696 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8697
8698#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8700#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8701 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8702
8703
8704
8705#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8706 do { \
8707 uint8_t u8Tmp; \
8708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8709 (a_u16Dst) = u8Tmp; \
8710 } while (0)
8711#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8712 do { \
8713 uint8_t u8Tmp; \
8714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8715 (a_u32Dst) = u8Tmp; \
8716 } while (0)
8717#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8718 do { \
8719 uint8_t u8Tmp; \
8720 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8721 (a_u64Dst) = u8Tmp; \
8722 } while (0)
8723#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8724 do { \
8725 uint16_t u16Tmp; \
8726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8727 (a_u32Dst) = u16Tmp; \
8728 } while (0)
8729#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8730 do { \
8731 uint16_t u16Tmp; \
8732 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8733 (a_u64Dst) = u16Tmp; \
8734 } while (0)
8735#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8736 do { \
8737 uint32_t u32Tmp; \
8738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8739 (a_u64Dst) = u32Tmp; \
8740 } while (0)
8741
8742#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8743 do { \
8744 uint8_t u8Tmp; \
8745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8746 (a_u16Dst) = (int8_t)u8Tmp; \
8747 } while (0)
8748#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8749 do { \
8750 uint8_t u8Tmp; \
8751 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8752 (a_u32Dst) = (int8_t)u8Tmp; \
8753 } while (0)
8754#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8755 do { \
8756 uint8_t u8Tmp; \
8757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8758 (a_u64Dst) = (int8_t)u8Tmp; \
8759 } while (0)
8760#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8761 do { \
8762 uint16_t u16Tmp; \
8763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8764 (a_u32Dst) = (int16_t)u16Tmp; \
8765 } while (0)
8766#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8767 do { \
8768 uint16_t u16Tmp; \
8769 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8770 (a_u64Dst) = (int16_t)u16Tmp; \
8771 } while (0)
8772#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8773 do { \
8774 uint32_t u32Tmp; \
8775 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8776 (a_u64Dst) = (int32_t)u32Tmp; \
8777 } while (0)
8778
8779#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8780 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8781#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8782 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8783#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8784 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8785#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8786 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8787
8788#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8789 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8790#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8791 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8792#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8793 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8794#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8795 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8796
8797#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8798#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8799#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8800#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8801#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8802#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8803#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8804 do { \
8805 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8806 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8807 } while (0)
8808
8809#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8810 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8811#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8812 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8813
8814
8815#define IEM_MC_PUSH_U16(a_u16Value) \
8816 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8817#define IEM_MC_PUSH_U32(a_u32Value) \
8818 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8819#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8820 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8821#define IEM_MC_PUSH_U64(a_u64Value) \
8822 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8823
8824#define IEM_MC_POP_U16(a_pu16Value) \
8825 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8826#define IEM_MC_POP_U32(a_pu32Value) \
8827 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8828#define IEM_MC_POP_U64(a_pu64Value) \
8829 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8830
8831/** Maps guest memory for direct or bounce buffered access.
8832 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8833 * @remarks May return.
8834 */
8835#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8836 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8837
8838/** Maps guest memory for direct or bounce buffered access.
8839 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8840 * @remarks May return.
8841 */
8842#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8843 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8844
8845/** Commits the memory and unmaps the guest memory.
8846 * @remarks May return.
8847 */
8848#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8849 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8850
8851/** Commits the memory and unmaps the guest memory unless the FPU status word
8852 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8853 * that would cause FLD not to store.
8854 *
8855 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8856 * store, while \#P will not.
8857 *
8858 * @remarks May in theory return - for now.
8859 */
8860#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8861 do { \
8862 if ( !(a_u16FSW & X86_FSW_ES) \
8863 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8864 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8865 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8866 } while (0)
8867
8868/** Calculate efficient address from R/M. */
8869#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8870 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8871
8872#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8873#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8874#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8875#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8876#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8877#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8878#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8879
8880/**
8881 * Defers the rest of the instruction emulation to a C implementation routine
8882 * and returns, only taking the standard parameters.
8883 *
8884 * @param a_pfnCImpl The pointer to the C routine.
8885 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8886 */
8887#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8888
8889/**
8890 * Defers the rest of instruction emulation to a C implementation routine and
8891 * returns, taking one argument in addition to the standard ones.
8892 *
8893 * @param a_pfnCImpl The pointer to the C routine.
8894 * @param a0 The argument.
8895 */
8896#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8897
8898/**
8899 * Defers the rest of the instruction emulation to a C implementation routine
8900 * and returns, taking two arguments in addition to the standard ones.
8901 *
8902 * @param a_pfnCImpl The pointer to the C routine.
8903 * @param a0 The first extra argument.
8904 * @param a1 The second extra argument.
8905 */
8906#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8907
8908/**
8909 * Defers the rest of the instruction emulation to a C implementation routine
8910 * and returns, taking three arguments in addition to the standard ones.
8911 *
8912 * @param a_pfnCImpl The pointer to the C routine.
8913 * @param a0 The first extra argument.
8914 * @param a1 The second extra argument.
8915 * @param a2 The third extra argument.
8916 */
8917#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8918
8919/**
8920 * Defers the rest of the instruction emulation to a C implementation routine
8921 * and returns, taking four arguments in addition to the standard ones.
8922 *
8923 * @param a_pfnCImpl The pointer to the C routine.
8924 * @param a0 The first extra argument.
8925 * @param a1 The second extra argument.
8926 * @param a2 The third extra argument.
8927 * @param a3 The fourth extra argument.
8928 */
8929#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8930
8931/**
8932 * Defers the rest of the instruction emulation to a C implementation routine
8933 * and returns, taking two arguments in addition to the standard ones.
8934 *
8935 * @param a_pfnCImpl The pointer to the C routine.
8936 * @param a0 The first extra argument.
8937 * @param a1 The second extra argument.
8938 * @param a2 The third extra argument.
8939 * @param a3 The fourth extra argument.
8940 * @param a4 The fifth extra argument.
8941 */
8942#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8943
8944/**
8945 * Defers the entire instruction emulation to a C implementation routine and
8946 * returns, only taking the standard parameters.
8947 *
8948 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8949 *
8950 * @param a_pfnCImpl The pointer to the C routine.
8951 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8952 */
8953#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8954
8955/**
8956 * Defers the entire instruction emulation to a C implementation routine and
8957 * returns, taking one argument in addition to the standard ones.
8958 *
8959 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8960 *
8961 * @param a_pfnCImpl The pointer to the C routine.
8962 * @param a0 The argument.
8963 */
8964#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8965
8966/**
8967 * Defers the entire instruction emulation to a C implementation routine and
8968 * returns, taking two arguments in addition to the standard ones.
8969 *
8970 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8971 *
8972 * @param a_pfnCImpl The pointer to the C routine.
8973 * @param a0 The first extra argument.
8974 * @param a1 The second extra argument.
8975 */
8976#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8977
8978/**
8979 * Defers the entire instruction emulation to a C implementation routine and
8980 * returns, taking three arguments in addition to the standard ones.
8981 *
8982 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8983 *
8984 * @param a_pfnCImpl The pointer to the C routine.
8985 * @param a0 The first extra argument.
8986 * @param a1 The second extra argument.
8987 * @param a2 The third extra argument.
8988 */
8989#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8990
8991/**
8992 * Calls a FPU assembly implementation taking one visible argument.
8993 *
8994 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8995 * @param a0 The first extra argument.
8996 */
8997#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
8998 do { \
8999 iemFpuPrepareUsage(pIemCpu); \
9000 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
9001 } while (0)
9002
9003/**
9004 * Calls a FPU assembly implementation taking two visible arguments.
9005 *
9006 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9007 * @param a0 The first extra argument.
9008 * @param a1 The second extra argument.
9009 */
9010#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9011 do { \
9012 iemFpuPrepareUsage(pIemCpu); \
9013 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9014 } while (0)
9015
9016/**
9017 * Calls a FPU assembly implementation taking three visible arguments.
9018 *
9019 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9020 * @param a0 The first extra argument.
9021 * @param a1 The second extra argument.
9022 * @param a2 The third extra argument.
9023 */
9024#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9025 do { \
9026 iemFpuPrepareUsage(pIemCpu); \
9027 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9028 } while (0)
9029
9030#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9031 do { \
9032 (a_FpuData).FSW = (a_FSW); \
9033 (a_FpuData).r80Result = *(a_pr80Value); \
9034 } while (0)
9035
9036/** Pushes FPU result onto the stack. */
9037#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9038 iemFpuPushResult(pIemCpu, &a_FpuData)
9039/** Pushes FPU result onto the stack and sets the FPUDP. */
9040#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9041 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9042
9043/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9044#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9045 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9046
9047/** Stores FPU result in a stack register. */
9048#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9049 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9050/** Stores FPU result in a stack register and pops the stack. */
9051#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9052 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9053/** Stores FPU result in a stack register and sets the FPUDP. */
9054#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9055 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9056/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9057 * stack. */
9058#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9059 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9060
9061/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9062#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9063 iemFpuUpdateOpcodeAndIp(pIemCpu)
9064/** Free a stack register (for FFREE and FFREEP). */
9065#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9066 iemFpuStackFree(pIemCpu, a_iStReg)
9067/** Increment the FPU stack pointer. */
9068#define IEM_MC_FPU_STACK_INC_TOP() \
9069 iemFpuStackIncTop(pIemCpu)
9070/** Decrement the FPU stack pointer. */
9071#define IEM_MC_FPU_STACK_DEC_TOP() \
9072 iemFpuStackDecTop(pIemCpu)
9073
9074/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9075#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9076 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9077/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9078#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9079 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9080/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9081#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9082 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9083/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9084#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9085 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9086/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9087 * stack. */
9088#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9089 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9090/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9091#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9092 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9093
9094/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9095#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9096 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9097/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9098 * stack. */
9099#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9100 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9101/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9102 * FPUDS. */
9103#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9104 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9105/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9106 * FPUDS. Pops stack. */
9107#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9108 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9109/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9110 * stack twice. */
9111#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9112 iemFpuStackUnderflowThenPopPop(pIemCpu)
9113/** Raises a FPU stack underflow exception for an instruction pushing a result
9114 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9115#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9116 iemFpuStackPushUnderflow(pIemCpu)
9117/** Raises a FPU stack underflow exception for an instruction pushing a result
9118 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9119#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9120 iemFpuStackPushUnderflowTwo(pIemCpu)
9121
9122/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9123 * FPUIP, FPUCS and FOP. */
9124#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9125 iemFpuStackPushOverflow(pIemCpu)
9126/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9127 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9128#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9129 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9130/** Indicates that we (might) have modified the FPU state. */
9131#define IEM_MC_USED_FPU() \
9132 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9133
9134/**
9135 * Calls a MMX assembly implementation taking two visible arguments.
9136 *
9137 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9138 * @param a0 The first extra argument.
9139 * @param a1 The second extra argument.
9140 */
9141#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9142 do { \
9143 iemFpuPrepareUsage(pIemCpu); \
9144 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9145 } while (0)
9146
9147/**
9148 * Calls a MMX assembly implementation taking three visible arguments.
9149 *
9150 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9151 * @param a0 The first extra argument.
9152 * @param a1 The second extra argument.
9153 * @param a2 The third extra argument.
9154 */
9155#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9156 do { \
9157 iemFpuPrepareUsage(pIemCpu); \
9158 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9159 } while (0)
9160
9161
9162/**
9163 * Calls a SSE assembly implementation taking two visible arguments.
9164 *
9165 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9166 * @param a0 The first extra argument.
9167 * @param a1 The second extra argument.
9168 */
9169#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9170 do { \
9171 iemFpuPrepareUsageSse(pIemCpu); \
9172 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9173 } while (0)
9174
9175/**
9176 * Calls a SSE assembly implementation taking three visible arguments.
9177 *
9178 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9179 * @param a0 The first extra argument.
9180 * @param a1 The second extra argument.
9181 * @param a2 The third extra argument.
9182 */
9183#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9184 do { \
9185 iemFpuPrepareUsageSse(pIemCpu); \
9186 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9187 } while (0)
9188
9189
9190/** @note Not for IOPL or IF testing. */
9191#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9192/** @note Not for IOPL or IF testing. */
9193#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9194/** @note Not for IOPL or IF testing. */
9195#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9196/** @note Not for IOPL or IF testing. */
9197#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9198/** @note Not for IOPL or IF testing. */
9199#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9200 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9201 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9202/** @note Not for IOPL or IF testing. */
9203#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9204 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9205 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9206/** @note Not for IOPL or IF testing. */
9207#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9208 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9209 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9210 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9211/** @note Not for IOPL or IF testing. */
9212#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9213 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9214 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9215 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9216#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9217#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9218#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9219/** @note Not for IOPL or IF testing. */
9220#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9221 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9222 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9223/** @note Not for IOPL or IF testing. */
9224#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9225 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9226 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9227/** @note Not for IOPL or IF testing. */
9228#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9229 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9230 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9231/** @note Not for IOPL or IF testing. */
9232#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9233 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9234 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9235/** @note Not for IOPL or IF testing. */
9236#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9237 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9238 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9239/** @note Not for IOPL or IF testing. */
9240#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9241 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9242 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9243#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9244#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9245#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9246 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9247#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9248 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9249#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9250 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9251#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9252 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9253#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9254 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9255#define IEM_MC_IF_FCW_IM() \
9256 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9257
9258#define IEM_MC_ELSE() } else {
9259#define IEM_MC_ENDIF() } do {} while (0)
9260
9261/** @} */
9262
9263
9264/** @name Opcode Debug Helpers.
9265 * @{
9266 */
9267#ifdef DEBUG
9268# define IEMOP_MNEMONIC(a_szMnemonic) \
9269 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9270 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9271# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9272 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9273 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9274#else
9275# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9276# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9277#endif
9278
9279/** @} */
9280
9281
9282/** @name Opcode Helpers.
9283 * @{
9284 */
9285
9286/** The instruction raises an \#UD in real and V8086 mode. */
9287#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9288 do \
9289 { \
9290 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9291 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9292 } while (0)
9293
9294/** The instruction allows no lock prefixing (in this encoding), throw \#UD if
9295 * lock prefixed.
9296 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9297#define IEMOP_HLP_NO_LOCK_PREFIX() \
9298 do \
9299 { \
9300 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9301 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9302 } while (0)
9303
9304/** The instruction is not available in 64-bit mode, throw \#UD if we're in
9305 * 64-bit mode. */
9306#define IEMOP_HLP_NO_64BIT() \
9307 do \
9308 { \
9309 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9310 return IEMOP_RAISE_INVALID_OPCODE(); \
9311 } while (0)
9312
9313/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
9314 * 64-bit mode. */
9315#define IEMOP_HLP_ONLY_64BIT() \
9316 do \
9317 { \
9318 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9319 return IEMOP_RAISE_INVALID_OPCODE(); \
9320 } while (0)
9321
9322/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9323#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9324 do \
9325 { \
9326 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9327 iemRecalEffOpSize64Default(pIemCpu); \
9328 } while (0)
9329
9330/** The instruction has 64-bit operand size if 64-bit mode. */
9331#define IEMOP_HLP_64BIT_OP_SIZE() \
9332 do \
9333 { \
9334 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9335 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9336 } while (0)
9337
9338/** Only a REX prefix immediately preceeding the first opcode byte takes
9339 * effect. This macro helps ensuring this as well as logging bad guest code. */
9340#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9341 do \
9342 { \
9343 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9344 { \
9345 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9346 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9347 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9348 pIemCpu->uRexB = 0; \
9349 pIemCpu->uRexIndex = 0; \
9350 pIemCpu->uRexReg = 0; \
9351 iemRecalEffOpSize(pIemCpu); \
9352 } \
9353 } while (0)
9354
9355/**
9356 * Done decoding.
9357 */
9358#define IEMOP_HLP_DONE_DECODING() \
9359 do \
9360 { \
9361 /*nothing for now, maybe later... */ \
9362 } while (0)
9363
9364/**
9365 * Done decoding, raise \#UD exception if lock prefix present.
9366 */
9367#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9368 do \
9369 { \
9370 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9371 { /* likely */ } \
9372 else \
9373 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9374 } while (0)
9375#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9376 do \
9377 { \
9378 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9379 { /* likely */ } \
9380 else \
9381 { \
9382 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9383 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9384 } \
9385 } while (0)
9386#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9387 do \
9388 { \
9389 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9390 { /* likely */ } \
9391 else \
9392 { \
9393 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9394 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9395 } \
9396 } while (0)
9397/**
9398 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9399 * are present.
9400 */
9401#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9402 do \
9403 { \
9404 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9405 { /* likely */ } \
9406 else \
9407 return IEMOP_RAISE_INVALID_OPCODE(); \
9408 } while (0)
9409
9410
9411/**
9412 * Calculates the effective address of a ModR/M memory operand.
9413 *
9414 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9415 *
9416 * @return Strict VBox status code.
9417 * @param pIemCpu The IEM per CPU data.
9418 * @param bRm The ModRM byte.
9419 * @param cbImm The size of any immediate following the
9420 * effective address opcode bytes. Important for
9421 * RIP relative addressing.
9422 * @param pGCPtrEff Where to return the effective address.
9423 */
9424IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9425{
9426 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9427 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9428#define SET_SS_DEF() \
9429 do \
9430 { \
9431 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9432 pIemCpu->iEffSeg = X86_SREG_SS; \
9433 } while (0)
9434
9435 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9436 {
9437/** @todo Check the effective address size crap! */
9438 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9439 {
9440 uint16_t u16EffAddr;
9441
9442 /* Handle the disp16 form with no registers first. */
9443 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9444 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9445 else
9446 {
9447 /* Get the displacment. */
9448 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9449 {
9450 case 0: u16EffAddr = 0; break;
9451 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9452 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9453 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9454 }
9455
9456 /* Add the base and index registers to the disp. */
9457 switch (bRm & X86_MODRM_RM_MASK)
9458 {
9459 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9460 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9461 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9462 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9463 case 4: u16EffAddr += pCtx->si; break;
9464 case 5: u16EffAddr += pCtx->di; break;
9465 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9466 case 7: u16EffAddr += pCtx->bx; break;
9467 }
9468 }
9469
9470 *pGCPtrEff = u16EffAddr;
9471 }
9472 else
9473 {
9474 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9475 uint32_t u32EffAddr;
9476
9477 /* Handle the disp32 form with no registers first. */
9478 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9479 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9480 else
9481 {
9482 /* Get the register (or SIB) value. */
9483 switch ((bRm & X86_MODRM_RM_MASK))
9484 {
9485 case 0: u32EffAddr = pCtx->eax; break;
9486 case 1: u32EffAddr = pCtx->ecx; break;
9487 case 2: u32EffAddr = pCtx->edx; break;
9488 case 3: u32EffAddr = pCtx->ebx; break;
9489 case 4: /* SIB */
9490 {
9491 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9492
9493 /* Get the index and scale it. */
9494 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9495 {
9496 case 0: u32EffAddr = pCtx->eax; break;
9497 case 1: u32EffAddr = pCtx->ecx; break;
9498 case 2: u32EffAddr = pCtx->edx; break;
9499 case 3: u32EffAddr = pCtx->ebx; break;
9500 case 4: u32EffAddr = 0; /*none */ break;
9501 case 5: u32EffAddr = pCtx->ebp; break;
9502 case 6: u32EffAddr = pCtx->esi; break;
9503 case 7: u32EffAddr = pCtx->edi; break;
9504 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9505 }
9506 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9507
9508 /* add base */
9509 switch (bSib & X86_SIB_BASE_MASK)
9510 {
9511 case 0: u32EffAddr += pCtx->eax; break;
9512 case 1: u32EffAddr += pCtx->ecx; break;
9513 case 2: u32EffAddr += pCtx->edx; break;
9514 case 3: u32EffAddr += pCtx->ebx; break;
9515 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9516 case 5:
9517 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9518 {
9519 u32EffAddr += pCtx->ebp;
9520 SET_SS_DEF();
9521 }
9522 else
9523 {
9524 uint32_t u32Disp;
9525 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9526 u32EffAddr += u32Disp;
9527 }
9528 break;
9529 case 6: u32EffAddr += pCtx->esi; break;
9530 case 7: u32EffAddr += pCtx->edi; break;
9531 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9532 }
9533 break;
9534 }
9535 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9536 case 6: u32EffAddr = pCtx->esi; break;
9537 case 7: u32EffAddr = pCtx->edi; break;
9538 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9539 }
9540
9541 /* Get and add the displacement. */
9542 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9543 {
9544 case 0:
9545 break;
9546 case 1:
9547 {
9548 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9549 u32EffAddr += i8Disp;
9550 break;
9551 }
9552 case 2:
9553 {
9554 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9555 u32EffAddr += u32Disp;
9556 break;
9557 }
9558 default:
9559 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9560 }
9561
9562 }
9563 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9564 *pGCPtrEff = u32EffAddr;
9565 else
9566 {
9567 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9568 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9569 }
9570 }
9571 }
9572 else
9573 {
9574 uint64_t u64EffAddr;
9575
9576 /* Handle the rip+disp32 form with no registers first. */
9577 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9578 {
9579 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9580 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9581 }
9582 else
9583 {
9584 /* Get the register (or SIB) value. */
9585 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9586 {
9587 case 0: u64EffAddr = pCtx->rax; break;
9588 case 1: u64EffAddr = pCtx->rcx; break;
9589 case 2: u64EffAddr = pCtx->rdx; break;
9590 case 3: u64EffAddr = pCtx->rbx; break;
9591 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9592 case 6: u64EffAddr = pCtx->rsi; break;
9593 case 7: u64EffAddr = pCtx->rdi; break;
9594 case 8: u64EffAddr = pCtx->r8; break;
9595 case 9: u64EffAddr = pCtx->r9; break;
9596 case 10: u64EffAddr = pCtx->r10; break;
9597 case 11: u64EffAddr = pCtx->r11; break;
9598 case 13: u64EffAddr = pCtx->r13; break;
9599 case 14: u64EffAddr = pCtx->r14; break;
9600 case 15: u64EffAddr = pCtx->r15; break;
9601 /* SIB */
9602 case 4:
9603 case 12:
9604 {
9605 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9606
9607 /* Get the index and scale it. */
9608 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9609 {
9610 case 0: u64EffAddr = pCtx->rax; break;
9611 case 1: u64EffAddr = pCtx->rcx; break;
9612 case 2: u64EffAddr = pCtx->rdx; break;
9613 case 3: u64EffAddr = pCtx->rbx; break;
9614 case 4: u64EffAddr = 0; /*none */ break;
9615 case 5: u64EffAddr = pCtx->rbp; break;
9616 case 6: u64EffAddr = pCtx->rsi; break;
9617 case 7: u64EffAddr = pCtx->rdi; break;
9618 case 8: u64EffAddr = pCtx->r8; break;
9619 case 9: u64EffAddr = pCtx->r9; break;
9620 case 10: u64EffAddr = pCtx->r10; break;
9621 case 11: u64EffAddr = pCtx->r11; break;
9622 case 12: u64EffAddr = pCtx->r12; break;
9623 case 13: u64EffAddr = pCtx->r13; break;
9624 case 14: u64EffAddr = pCtx->r14; break;
9625 case 15: u64EffAddr = pCtx->r15; break;
9626 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9627 }
9628 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9629
9630 /* add base */
9631 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9632 {
9633 case 0: u64EffAddr += pCtx->rax; break;
9634 case 1: u64EffAddr += pCtx->rcx; break;
9635 case 2: u64EffAddr += pCtx->rdx; break;
9636 case 3: u64EffAddr += pCtx->rbx; break;
9637 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9638 case 6: u64EffAddr += pCtx->rsi; break;
9639 case 7: u64EffAddr += pCtx->rdi; break;
9640 case 8: u64EffAddr += pCtx->r8; break;
9641 case 9: u64EffAddr += pCtx->r9; break;
9642 case 10: u64EffAddr += pCtx->r10; break;
9643 case 11: u64EffAddr += pCtx->r11; break;
9644 case 12: u64EffAddr += pCtx->r12; break;
9645 case 14: u64EffAddr += pCtx->r14; break;
9646 case 15: u64EffAddr += pCtx->r15; break;
9647 /* complicated encodings */
9648 case 5:
9649 case 13:
9650 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9651 {
9652 if (!pIemCpu->uRexB)
9653 {
9654 u64EffAddr += pCtx->rbp;
9655 SET_SS_DEF();
9656 }
9657 else
9658 u64EffAddr += pCtx->r13;
9659 }
9660 else
9661 {
9662 uint32_t u32Disp;
9663 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9664 u64EffAddr += (int32_t)u32Disp;
9665 }
9666 break;
9667 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9668 }
9669 break;
9670 }
9671 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9672 }
9673
9674 /* Get and add the displacement. */
9675 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9676 {
9677 case 0:
9678 break;
9679 case 1:
9680 {
9681 int8_t i8Disp;
9682 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9683 u64EffAddr += i8Disp;
9684 break;
9685 }
9686 case 2:
9687 {
9688 uint32_t u32Disp;
9689 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9690 u64EffAddr += (int32_t)u32Disp;
9691 break;
9692 }
9693 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9694 }
9695
9696 }
9697
9698 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9699 *pGCPtrEff = u64EffAddr;
9700 else
9701 {
9702 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9703 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9704 }
9705 }
9706
9707 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9708 return VINF_SUCCESS;
9709}
9710
9711/** @} */
9712
9713
9714
9715/*
9716 * Include the instructions
9717 */
9718#include "IEMAllInstructions.cpp.h"
9719
9720
9721
9722
9723#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9724
9725/**
9726 * Sets up execution verification mode.
9727 */
9728IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9729{
9730 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9731 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9732
9733 /*
9734 * Always note down the address of the current instruction.
9735 */
9736 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9737 pIemCpu->uOldRip = pOrgCtx->rip;
9738
9739 /*
9740 * Enable verification and/or logging.
9741 */
9742 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9743 if ( fNewNoRem
9744 && ( 0
9745#if 0 /* auto enable on first paged protected mode interrupt */
9746 || ( pOrgCtx->eflags.Bits.u1IF
9747 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9748 && TRPMHasTrap(pVCpu)
9749 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9750#endif
9751#if 0
9752 || ( pOrgCtx->cs == 0x10
9753 && ( pOrgCtx->rip == 0x90119e3e
9754 || pOrgCtx->rip == 0x901d9810)
9755#endif
9756#if 0 /* Auto enable DSL - FPU stuff. */
9757 || ( pOrgCtx->cs == 0x10
9758 && (// pOrgCtx->rip == 0xc02ec07f
9759 //|| pOrgCtx->rip == 0xc02ec082
9760 //|| pOrgCtx->rip == 0xc02ec0c9
9761 0
9762 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9763#endif
9764#if 0 /* Auto enable DSL - fstp st0 stuff. */
9765 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9766#endif
9767#if 0
9768 || pOrgCtx->rip == 0x9022bb3a
9769#endif
9770#if 0
9771 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9772#endif
9773#if 0
9774 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9775 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9776#endif
9777#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9778 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9779 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9780 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9781#endif
9782#if 0 /* NT4SP1 - xadd early boot. */
9783 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9784#endif
9785#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9786 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9787#endif
9788#if 0 /* NT4SP1 - cmpxchg (AMD). */
9789 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9790#endif
9791#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9792 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9793#endif
9794#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9795 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9796
9797#endif
9798#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9799 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9800
9801#endif
9802#if 0 /* NT4SP1 - frstor [ecx] */
9803 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9804#endif
9805#if 0 /* xxxxxx - All long mode code. */
9806 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9807#endif
9808#if 0 /* rep movsq linux 3.7 64-bit boot. */
9809 || (pOrgCtx->rip == 0x0000000000100241)
9810#endif
9811#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9812 || (pOrgCtx->rip == 0x000000000215e240)
9813#endif
9814#if 0 /* DOS's size-overridden iret to v8086. */
9815 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9816#endif
9817 )
9818 )
9819 {
9820 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9821 RTLogFlags(NULL, "enabled");
9822 fNewNoRem = false;
9823 }
9824 if (fNewNoRem != pIemCpu->fNoRem)
9825 {
9826 pIemCpu->fNoRem = fNewNoRem;
9827 if (!fNewNoRem)
9828 {
9829 LogAlways(("Enabling verification mode!\n"));
9830 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9831 }
9832 else
9833 LogAlways(("Disabling verification mode!\n"));
9834 }
9835
9836 /*
9837 * Switch state.
9838 */
9839 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9840 {
9841 static CPUMCTX s_DebugCtx; /* Ugly! */
9842
9843 s_DebugCtx = *pOrgCtx;
9844 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9845 }
9846
9847 /*
9848 * See if there is an interrupt pending in TRPM and inject it if we can.
9849 */
9850 pIemCpu->uInjectCpl = UINT8_MAX;
9851 if ( pOrgCtx->eflags.Bits.u1IF
9852 && TRPMHasTrap(pVCpu)
9853 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9854 {
9855 uint8_t u8TrapNo;
9856 TRPMEVENT enmType;
9857 RTGCUINT uErrCode;
9858 RTGCPTR uCr2;
9859 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9860 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9861 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9862 TRPMResetTrap(pVCpu);
9863 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9864 }
9865
9866 /*
9867 * Reset the counters.
9868 */
9869 pIemCpu->cIOReads = 0;
9870 pIemCpu->cIOWrites = 0;
9871 pIemCpu->fIgnoreRaxRdx = false;
9872 pIemCpu->fOverlappingMovs = false;
9873 pIemCpu->fProblematicMemory = false;
9874 pIemCpu->fUndefinedEFlags = 0;
9875
9876 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9877 {
9878 /*
9879 * Free all verification records.
9880 */
9881 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9882 pIemCpu->pIemEvtRecHead = NULL;
9883 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9884 do
9885 {
9886 while (pEvtRec)
9887 {
9888 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9889 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9890 pIemCpu->pFreeEvtRec = pEvtRec;
9891 pEvtRec = pNext;
9892 }
9893 pEvtRec = pIemCpu->pOtherEvtRecHead;
9894 pIemCpu->pOtherEvtRecHead = NULL;
9895 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9896 } while (pEvtRec);
9897 }
9898}
9899
9900
9901/**
9902 * Allocate an event record.
9903 * @returns Pointer to a record.
9904 */
9905IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9906{
9907 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9908 return NULL;
9909
9910 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9911 if (pEvtRec)
9912 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9913 else
9914 {
9915 if (!pIemCpu->ppIemEvtRecNext)
9916 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9917
9918 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9919 if (!pEvtRec)
9920 return NULL;
9921 }
9922 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9923 pEvtRec->pNext = NULL;
9924 return pEvtRec;
9925}
9926
9927
9928/**
9929 * IOMMMIORead notification.
9930 */
9931VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9932{
9933 PVMCPU pVCpu = VMMGetCpu(pVM);
9934 if (!pVCpu)
9935 return;
9936 PIEMCPU pIemCpu = &pVCpu->iem.s;
9937 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9938 if (!pEvtRec)
9939 return;
9940 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9941 pEvtRec->u.RamRead.GCPhys = GCPhys;
9942 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9943 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9944 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9945}
9946
9947
9948/**
9949 * IOMMMIOWrite notification.
9950 */
9951VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
9952{
9953 PVMCPU pVCpu = VMMGetCpu(pVM);
9954 if (!pVCpu)
9955 return;
9956 PIEMCPU pIemCpu = &pVCpu->iem.s;
9957 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9958 if (!pEvtRec)
9959 return;
9960 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
9961 pEvtRec->u.RamWrite.GCPhys = GCPhys;
9962 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
9963 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
9964 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
9965 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
9966 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
9967 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9968 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9969}
9970
9971
9972/**
9973 * IOMIOPortRead notification.
9974 */
9975VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
9976{
9977 PVMCPU pVCpu = VMMGetCpu(pVM);
9978 if (!pVCpu)
9979 return;
9980 PIEMCPU pIemCpu = &pVCpu->iem.s;
9981 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9982 if (!pEvtRec)
9983 return;
9984 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9985 pEvtRec->u.IOPortRead.Port = Port;
9986 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9987 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9988 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9989}
9990
9991/**
9992 * IOMIOPortWrite notification.
9993 */
9994VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9995{
9996 PVMCPU pVCpu = VMMGetCpu(pVM);
9997 if (!pVCpu)
9998 return;
9999 PIEMCPU pIemCpu = &pVCpu->iem.s;
10000 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10001 if (!pEvtRec)
10002 return;
10003 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10004 pEvtRec->u.IOPortWrite.Port = Port;
10005 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10006 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10007 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10008 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10009}
10010
10011
10012VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10013{
10014 AssertFailed();
10015}
10016
10017
10018VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10019{
10020 AssertFailed();
10021}
10022
10023
10024/**
10025 * Fakes and records an I/O port read.
10026 *
10027 * @returns VINF_SUCCESS.
10028 * @param pIemCpu The IEM per CPU data.
10029 * @param Port The I/O port.
10030 * @param pu32Value Where to store the fake value.
10031 * @param cbValue The size of the access.
10032 */
10033IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10034{
10035 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10036 if (pEvtRec)
10037 {
10038 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10039 pEvtRec->u.IOPortRead.Port = Port;
10040 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
10041 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10042 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10043 }
10044 pIemCpu->cIOReads++;
10045 *pu32Value = 0xcccccccc;
10046 return VINF_SUCCESS;
10047}
10048
10049
10050/**
10051 * Fakes and records an I/O port write.
10052 *
10053 * @returns VINF_SUCCESS.
10054 * @param pIemCpu The IEM per CPU data.
10055 * @param Port The I/O port.
10056 * @param u32Value The value being written.
10057 * @param cbValue The size of the access.
10058 */
10059IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10060{
10061 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10062 if (pEvtRec)
10063 {
10064 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10065 pEvtRec->u.IOPortWrite.Port = Port;
10066 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10067 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10068 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10069 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10070 }
10071 pIemCpu->cIOWrites++;
10072 return VINF_SUCCESS;
10073}
10074
10075
10076/**
10077 * Used to add extra details about a stub case.
10078 * @param pIemCpu The IEM per CPU state.
10079 */
10080IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10081{
10082 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10083 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10084 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10085 char szRegs[4096];
10086 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10087 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10088 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10089 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10090 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10091 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10092 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10093 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10094 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10095 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10096 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10097 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10098 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10099 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10100 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10101 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10102 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10103 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10104 " efer=%016VR{efer}\n"
10105 " pat=%016VR{pat}\n"
10106 " sf_mask=%016VR{sf_mask}\n"
10107 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10108 " lstar=%016VR{lstar}\n"
10109 " star=%016VR{star} cstar=%016VR{cstar}\n"
10110 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10111 );
10112
10113 char szInstr1[256];
10114 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10115 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10116 szInstr1, sizeof(szInstr1), NULL);
10117 char szInstr2[256];
10118 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10119 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10120 szInstr2, sizeof(szInstr2), NULL);
10121
10122 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10123}
10124
10125
10126/**
10127 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10128 * dump to the assertion info.
10129 *
10130 * @param pEvtRec The record to dump.
10131 */
10132IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10133{
10134 switch (pEvtRec->enmEvent)
10135 {
10136 case IEMVERIFYEVENT_IOPORT_READ:
10137 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10138 pEvtRec->u.IOPortWrite.Port,
10139 pEvtRec->u.IOPortWrite.cbValue);
10140 break;
10141 case IEMVERIFYEVENT_IOPORT_WRITE:
10142 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10143 pEvtRec->u.IOPortWrite.Port,
10144 pEvtRec->u.IOPortWrite.cbValue,
10145 pEvtRec->u.IOPortWrite.u32Value);
10146 break;
10147 case IEMVERIFYEVENT_RAM_READ:
10148 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10149 pEvtRec->u.RamRead.GCPhys,
10150 pEvtRec->u.RamRead.cb);
10151 break;
10152 case IEMVERIFYEVENT_RAM_WRITE:
10153 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10154 pEvtRec->u.RamWrite.GCPhys,
10155 pEvtRec->u.RamWrite.cb,
10156 (int)pEvtRec->u.RamWrite.cb,
10157 pEvtRec->u.RamWrite.ab);
10158 break;
10159 default:
10160 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10161 break;
10162 }
10163}
10164
10165
10166/**
10167 * Raises an assertion on the specified record, showing the given message with
10168 * a record dump attached.
10169 *
10170 * @param pIemCpu The IEM per CPU data.
10171 * @param pEvtRec1 The first record.
10172 * @param pEvtRec2 The second record.
10173 * @param pszMsg The message explaining why we're asserting.
10174 */
10175IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10176{
10177 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10178 iemVerifyAssertAddRecordDump(pEvtRec1);
10179 iemVerifyAssertAddRecordDump(pEvtRec2);
10180 iemVerifyAssertMsg2(pIemCpu);
10181 RTAssertPanic();
10182}
10183
10184
10185/**
10186 * Raises an assertion on the specified record, showing the given message with
10187 * a record dump attached.
10188 *
10189 * @param pIemCpu The IEM per CPU data.
10190 * @param pEvtRec1 The first record.
10191 * @param pszMsg The message explaining why we're asserting.
10192 */
10193IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10194{
10195 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10196 iemVerifyAssertAddRecordDump(pEvtRec);
10197 iemVerifyAssertMsg2(pIemCpu);
10198 RTAssertPanic();
10199}
10200
10201
10202/**
10203 * Verifies a write record.
10204 *
10205 * @param pIemCpu The IEM per CPU data.
10206 * @param pEvtRec The write record.
10207 * @param fRem Set if REM was doing the other executing. If clear
10208 * it was HM.
10209 */
10210IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10211{
10212 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10213 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10214 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10215 if ( RT_FAILURE(rc)
10216 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10217 {
10218 /* fend off ins */
10219 if ( !pIemCpu->cIOReads
10220 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10221 || ( pEvtRec->u.RamWrite.cb != 1
10222 && pEvtRec->u.RamWrite.cb != 2
10223 && pEvtRec->u.RamWrite.cb != 4) )
10224 {
10225 /* fend off ROMs and MMIO */
10226 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10227 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10228 {
10229 /* fend off fxsave */
10230 if (pEvtRec->u.RamWrite.cb != 512)
10231 {
10232 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10233 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10234 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10235 RTAssertMsg2Add("%s: %.*Rhxs\n"
10236 "iem: %.*Rhxs\n",
10237 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10238 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10239 iemVerifyAssertAddRecordDump(pEvtRec);
10240 iemVerifyAssertMsg2(pIemCpu);
10241 RTAssertPanic();
10242 }
10243 }
10244 }
10245 }
10246
10247}
10248
10249/**
10250 * Performs the post-execution verfication checks.
10251 */
10252IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10253{
10254 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10255 return;
10256
10257 /*
10258 * Switch back the state.
10259 */
10260 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10261 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10262 Assert(pOrgCtx != pDebugCtx);
10263 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10264
10265 /*
10266 * Execute the instruction in REM.
10267 */
10268 bool fRem = false;
10269 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10270 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10271 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10272#ifdef IEM_VERIFICATION_MODE_FULL_HM
10273 if ( HMIsEnabled(pVM)
10274 && pIemCpu->cIOReads == 0
10275 && pIemCpu->cIOWrites == 0
10276 && !pIemCpu->fProblematicMemory)
10277 {
10278 uint64_t uStartRip = pOrgCtx->rip;
10279 unsigned iLoops = 0;
10280 do
10281 {
10282 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10283 iLoops++;
10284 } while ( rc == VINF_SUCCESS
10285 || ( rc == VINF_EM_DBG_STEPPED
10286 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10287 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10288 || ( pOrgCtx->rip != pDebugCtx->rip
10289 && pIemCpu->uInjectCpl != UINT8_MAX
10290 && iLoops < 8) );
10291 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10292 rc = VINF_SUCCESS;
10293 }
10294#endif
10295 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10296 || rc == VINF_IOM_R3_IOPORT_READ
10297 || rc == VINF_IOM_R3_IOPORT_WRITE
10298 || rc == VINF_IOM_R3_MMIO_READ
10299 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10300 || rc == VINF_IOM_R3_MMIO_WRITE
10301 || rc == VINF_CPUM_R3_MSR_READ
10302 || rc == VINF_CPUM_R3_MSR_WRITE
10303 || rc == VINF_EM_RESCHEDULE
10304 )
10305 {
10306 EMRemLock(pVM);
10307 rc = REMR3EmulateInstruction(pVM, pVCpu);
10308 AssertRC(rc);
10309 EMRemUnlock(pVM);
10310 fRem = true;
10311 }
10312
10313 /*
10314 * Compare the register states.
10315 */
10316 unsigned cDiffs = 0;
10317 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10318 {
10319 //Log(("REM and IEM ends up with different registers!\n"));
10320 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10321
10322# define CHECK_FIELD(a_Field) \
10323 do \
10324 { \
10325 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10326 { \
10327 switch (sizeof(pOrgCtx->a_Field)) \
10328 { \
10329 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10330 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10331 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10332 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10333 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10334 } \
10335 cDiffs++; \
10336 } \
10337 } while (0)
10338# define CHECK_XSTATE_FIELD(a_Field) \
10339 do \
10340 { \
10341 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10342 { \
10343 switch (sizeof(pOrgXState->a_Field)) \
10344 { \
10345 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10346 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10347 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10348 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10349 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10350 } \
10351 cDiffs++; \
10352 } \
10353 } while (0)
10354
10355# define CHECK_BIT_FIELD(a_Field) \
10356 do \
10357 { \
10358 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10359 { \
10360 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10361 cDiffs++; \
10362 } \
10363 } while (0)
10364
10365# define CHECK_SEL(a_Sel) \
10366 do \
10367 { \
10368 CHECK_FIELD(a_Sel.Sel); \
10369 CHECK_FIELD(a_Sel.Attr.u); \
10370 CHECK_FIELD(a_Sel.u64Base); \
10371 CHECK_FIELD(a_Sel.u32Limit); \
10372 CHECK_FIELD(a_Sel.fFlags); \
10373 } while (0)
10374
10375 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10376 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10377
10378#if 1 /* The recompiler doesn't update these the intel way. */
10379 if (fRem)
10380 {
10381 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10382 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10383 pOrgXState->x87.CS = pDebugXState->x87.CS;
10384 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10385 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10386 pOrgXState->x87.DS = pDebugXState->x87.DS;
10387 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10388 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10389 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10390 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10391 }
10392#endif
10393 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10394 {
10395 RTAssertMsg2Weak(" the FPU state differs\n");
10396 cDiffs++;
10397 CHECK_XSTATE_FIELD(x87.FCW);
10398 CHECK_XSTATE_FIELD(x87.FSW);
10399 CHECK_XSTATE_FIELD(x87.FTW);
10400 CHECK_XSTATE_FIELD(x87.FOP);
10401 CHECK_XSTATE_FIELD(x87.FPUIP);
10402 CHECK_XSTATE_FIELD(x87.CS);
10403 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10404 CHECK_XSTATE_FIELD(x87.FPUDP);
10405 CHECK_XSTATE_FIELD(x87.DS);
10406 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10407 CHECK_XSTATE_FIELD(x87.MXCSR);
10408 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10409 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10410 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10411 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10412 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10413 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10414 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10415 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10416 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10417 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10418 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10419 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10420 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10421 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10422 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10423 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10424 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10425 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10426 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10427 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10428 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10429 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10430 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10431 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10432 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10433 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10434 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10435 }
10436 CHECK_FIELD(rip);
10437 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10438 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10439 {
10440 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10441 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10442 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10443 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10444 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10445 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10446 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10447 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10448 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10449 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10450 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10451 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10452 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10453 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10454 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10455 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10456 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10457 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10458 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10459 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10460 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10461 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10462 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10463 }
10464
10465 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10466 CHECK_FIELD(rax);
10467 CHECK_FIELD(rcx);
10468 if (!pIemCpu->fIgnoreRaxRdx)
10469 CHECK_FIELD(rdx);
10470 CHECK_FIELD(rbx);
10471 CHECK_FIELD(rsp);
10472 CHECK_FIELD(rbp);
10473 CHECK_FIELD(rsi);
10474 CHECK_FIELD(rdi);
10475 CHECK_FIELD(r8);
10476 CHECK_FIELD(r9);
10477 CHECK_FIELD(r10);
10478 CHECK_FIELD(r11);
10479 CHECK_FIELD(r12);
10480 CHECK_FIELD(r13);
10481 CHECK_SEL(cs);
10482 CHECK_SEL(ss);
10483 CHECK_SEL(ds);
10484 CHECK_SEL(es);
10485 CHECK_SEL(fs);
10486 CHECK_SEL(gs);
10487 CHECK_FIELD(cr0);
10488
10489 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10490 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10491 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10492 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10493 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10494 {
10495 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10496 { /* ignore */ }
10497 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10498 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10499 && fRem)
10500 { /* ignore */ }
10501 else
10502 CHECK_FIELD(cr2);
10503 }
10504 CHECK_FIELD(cr3);
10505 CHECK_FIELD(cr4);
10506 CHECK_FIELD(dr[0]);
10507 CHECK_FIELD(dr[1]);
10508 CHECK_FIELD(dr[2]);
10509 CHECK_FIELD(dr[3]);
10510 CHECK_FIELD(dr[6]);
10511 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10512 CHECK_FIELD(dr[7]);
10513 CHECK_FIELD(gdtr.cbGdt);
10514 CHECK_FIELD(gdtr.pGdt);
10515 CHECK_FIELD(idtr.cbIdt);
10516 CHECK_FIELD(idtr.pIdt);
10517 CHECK_SEL(ldtr);
10518 CHECK_SEL(tr);
10519 CHECK_FIELD(SysEnter.cs);
10520 CHECK_FIELD(SysEnter.eip);
10521 CHECK_FIELD(SysEnter.esp);
10522 CHECK_FIELD(msrEFER);
10523 CHECK_FIELD(msrSTAR);
10524 CHECK_FIELD(msrPAT);
10525 CHECK_FIELD(msrLSTAR);
10526 CHECK_FIELD(msrCSTAR);
10527 CHECK_FIELD(msrSFMASK);
10528 CHECK_FIELD(msrKERNELGSBASE);
10529
10530 if (cDiffs != 0)
10531 {
10532 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10533 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10534 iemVerifyAssertMsg2(pIemCpu);
10535 RTAssertPanic();
10536 }
10537# undef CHECK_FIELD
10538# undef CHECK_BIT_FIELD
10539 }
10540
10541 /*
10542 * If the register state compared fine, check the verification event
10543 * records.
10544 */
10545 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10546 {
10547 /*
10548 * Compare verficiation event records.
10549 * - I/O port accesses should be a 1:1 match.
10550 */
10551 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10552 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10553 while (pIemRec && pOtherRec)
10554 {
10555 /* Since we might miss RAM writes and reads, ignore reads and check
10556 that any written memory is the same extra ones. */
10557 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10558 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10559 && pIemRec->pNext)
10560 {
10561 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10562 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10563 pIemRec = pIemRec->pNext;
10564 }
10565
10566 /* Do the compare. */
10567 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10568 {
10569 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10570 break;
10571 }
10572 bool fEquals;
10573 switch (pIemRec->enmEvent)
10574 {
10575 case IEMVERIFYEVENT_IOPORT_READ:
10576 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10577 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10578 break;
10579 case IEMVERIFYEVENT_IOPORT_WRITE:
10580 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10581 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10582 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10583 break;
10584 case IEMVERIFYEVENT_RAM_READ:
10585 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10586 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10587 break;
10588 case IEMVERIFYEVENT_RAM_WRITE:
10589 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10590 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10591 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10592 break;
10593 default:
10594 fEquals = false;
10595 break;
10596 }
10597 if (!fEquals)
10598 {
10599 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10600 break;
10601 }
10602
10603 /* advance */
10604 pIemRec = pIemRec->pNext;
10605 pOtherRec = pOtherRec->pNext;
10606 }
10607
10608 /* Ignore extra writes and reads. */
10609 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10610 {
10611 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10612 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10613 pIemRec = pIemRec->pNext;
10614 }
10615 if (pIemRec != NULL)
10616 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10617 else if (pOtherRec != NULL)
10618 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10619 }
10620 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10621}
10622
10623#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10624
10625/* stubs */
10626IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10627{
10628 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10629 return VERR_INTERNAL_ERROR;
10630}
10631
10632IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10633{
10634 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10635 return VERR_INTERNAL_ERROR;
10636}
10637
10638#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10639
10640
10641#ifdef LOG_ENABLED
10642/**
10643 * Logs the current instruction.
10644 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10645 * @param pCtx The current CPU context.
10646 * @param fSameCtx Set if we have the same context information as the VMM,
10647 * clear if we may have already executed an instruction in
10648 * our debug context. When clear, we assume IEMCPU holds
10649 * valid CPU mode info.
10650 */
10651IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10652{
10653# ifdef IN_RING3
10654 if (LogIs2Enabled())
10655 {
10656 char szInstr[256];
10657 uint32_t cbInstr = 0;
10658 if (fSameCtx)
10659 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10660 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10661 szInstr, sizeof(szInstr), &cbInstr);
10662 else
10663 {
10664 uint32_t fFlags = 0;
10665 switch (pVCpu->iem.s.enmCpuMode)
10666 {
10667 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10668 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10669 case IEMMODE_16BIT:
10670 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10671 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10672 else
10673 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10674 break;
10675 }
10676 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10677 szInstr, sizeof(szInstr), &cbInstr);
10678 }
10679
10680 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10681 Log2(("****\n"
10682 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10683 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10684 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10685 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10686 " %s\n"
10687 ,
10688 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10689 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10690 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10691 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10692 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10693 szInstr));
10694
10695 if (LogIs3Enabled())
10696 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10697 }
10698 else
10699# endif
10700 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10701 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10702}
10703#endif
10704
10705
10706/**
10707 * Makes status code addjustments (pass up from I/O and access handler)
10708 * as well as maintaining statistics.
10709 *
10710 * @returns Strict VBox status code to pass up.
10711 * @param pIemCpu The IEM per CPU data.
10712 * @param rcStrict The status from executing an instruction.
10713 */
10714DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10715{
10716 if (rcStrict != VINF_SUCCESS)
10717 {
10718 if (RT_SUCCESS(rcStrict))
10719 {
10720 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10721 || rcStrict == VINF_IOM_R3_IOPORT_READ
10722 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10723 || rcStrict == VINF_IOM_R3_MMIO_READ
10724 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10725 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10726 || rcStrict == VINF_CPUM_R3_MSR_READ
10727 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10728 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10729 || rcStrict == VINF_EM_RAW_TO_R3
10730 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
10731 /* raw-mode / virt handlers only: */
10732 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10733 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10734 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10735 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10736 || rcStrict == VINF_SELM_SYNC_GDT
10737 || rcStrict == VINF_CSAM_PENDING_ACTION
10738 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10739 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10740/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10741 int32_t const rcPassUp = pIemCpu->rcPassUp;
10742 if (rcPassUp == VINF_SUCCESS)
10743 pIemCpu->cRetInfStatuses++;
10744 else if ( rcPassUp < VINF_EM_FIRST
10745 || rcPassUp > VINF_EM_LAST
10746 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10747 {
10748 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10749 pIemCpu->cRetPassUpStatus++;
10750 rcStrict = rcPassUp;
10751 }
10752 else
10753 {
10754 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10755 pIemCpu->cRetInfStatuses++;
10756 }
10757 }
10758 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10759 pIemCpu->cRetAspectNotImplemented++;
10760 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10761 pIemCpu->cRetInstrNotImplemented++;
10762#ifdef IEM_VERIFICATION_MODE_FULL
10763 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10764 rcStrict = VINF_SUCCESS;
10765#endif
10766 else
10767 pIemCpu->cRetErrStatuses++;
10768 }
10769 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10770 {
10771 pIemCpu->cRetPassUpStatus++;
10772 rcStrict = pIemCpu->rcPassUp;
10773 }
10774
10775 return rcStrict;
10776}
10777
10778
10779/**
10780 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10781 * IEMExecOneWithPrefetchedByPC.
10782 *
10783 * @return Strict VBox status code.
10784 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10785 * @param pIemCpu The IEM per CPU data.
10786 * @param fExecuteInhibit If set, execute the instruction following CLI,
10787 * POP SS and MOV SS,GR.
10788 */
10789DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10790{
10791 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10792 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10793 if (rcStrict == VINF_SUCCESS)
10794 pIemCpu->cInstructions++;
10795 if (pIemCpu->cActiveMappings > 0)
10796 iemMemRollback(pIemCpu);
10797//#ifdef DEBUG
10798// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10799//#endif
10800
10801 /* Execute the next instruction as well if a cli, pop ss or
10802 mov ss, Gr has just completed successfully. */
10803 if ( fExecuteInhibit
10804 && rcStrict == VINF_SUCCESS
10805 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10806 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10807 {
10808 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10809 if (rcStrict == VINF_SUCCESS)
10810 {
10811# ifdef LOG_ENABLED
10812 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10813# endif
10814 IEM_OPCODE_GET_NEXT_U8(&b);
10815 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10816 if (rcStrict == VINF_SUCCESS)
10817 pIemCpu->cInstructions++;
10818 if (pIemCpu->cActiveMappings > 0)
10819 iemMemRollback(pIemCpu);
10820 }
10821 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10822 }
10823
10824 /*
10825 * Return value fiddling, statistics and sanity assertions.
10826 */
10827 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10828
10829 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10830 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10831#if defined(IEM_VERIFICATION_MODE_FULL)
10832 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10833 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10834 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10835 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10836#endif
10837 return rcStrict;
10838}
10839
10840
10841#ifdef IN_RC
10842/**
10843 * Re-enters raw-mode or ensure we return to ring-3.
10844 *
10845 * @returns rcStrict, maybe modified.
10846 * @param pIemCpu The IEM CPU structure.
10847 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10848 * @param pCtx The current CPU context.
10849 * @param rcStrict The status code returne by the interpreter.
10850 */
10851DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10852{
10853 if (!pIemCpu->fInPatchCode)
10854 CPUMRawEnter(pVCpu);
10855 return rcStrict;
10856}
10857#endif
10858
10859
10860/**
10861 * Execute one instruction.
10862 *
10863 * @return Strict VBox status code.
10864 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10865 */
10866VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10867{
10868 PIEMCPU pIemCpu = &pVCpu->iem.s;
10869
10870#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10871 iemExecVerificationModeSetup(pIemCpu);
10872#endif
10873#ifdef LOG_ENABLED
10874 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10875 iemLogCurInstr(pVCpu, pCtx, true);
10876#endif
10877
10878 /*
10879 * Do the decoding and emulation.
10880 */
10881 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10882 if (rcStrict == VINF_SUCCESS)
10883 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10884
10885#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10886 /*
10887 * Assert some sanity.
10888 */
10889 iemExecVerificationModeCheck(pIemCpu);
10890#endif
10891#ifdef IN_RC
10892 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10893#endif
10894 if (rcStrict != VINF_SUCCESS)
10895 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10896 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10897 return rcStrict;
10898}
10899
10900
10901VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10902{
10903 PIEMCPU pIemCpu = &pVCpu->iem.s;
10904 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10905 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10906
10907 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10908 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10909 if (rcStrict == VINF_SUCCESS)
10910 {
10911 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10912 if (pcbWritten)
10913 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10914 }
10915
10916#ifdef IN_RC
10917 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10918#endif
10919 return rcStrict;
10920}
10921
10922
10923VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10924 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10925{
10926 PIEMCPU pIemCpu = &pVCpu->iem.s;
10927 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10928 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10929
10930 VBOXSTRICTRC rcStrict;
10931 if ( cbOpcodeBytes
10932 && pCtx->rip == OpcodeBytesPC)
10933 {
10934 iemInitDecoder(pIemCpu, false);
10935 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10936 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10937 rcStrict = VINF_SUCCESS;
10938 }
10939 else
10940 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10941 if (rcStrict == VINF_SUCCESS)
10942 {
10943 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10944 }
10945
10946#ifdef IN_RC
10947 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10948#endif
10949 return rcStrict;
10950}
10951
10952
10953VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10954{
10955 PIEMCPU pIemCpu = &pVCpu->iem.s;
10956 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10957 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10958
10959 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10960 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10961 if (rcStrict == VINF_SUCCESS)
10962 {
10963 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10964 if (pcbWritten)
10965 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10966 }
10967
10968#ifdef IN_RC
10969 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10970#endif
10971 return rcStrict;
10972}
10973
10974
10975VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10976 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10977{
10978 PIEMCPU pIemCpu = &pVCpu->iem.s;
10979 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10980 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10981
10982 VBOXSTRICTRC rcStrict;
10983 if ( cbOpcodeBytes
10984 && pCtx->rip == OpcodeBytesPC)
10985 {
10986 iemInitDecoder(pIemCpu, true);
10987 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10988 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10989 rcStrict = VINF_SUCCESS;
10990 }
10991 else
10992 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10993 if (rcStrict == VINF_SUCCESS)
10994 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10995
10996#ifdef IN_RC
10997 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10998#endif
10999 return rcStrict;
11000}
11001
11002
11003VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11004{
11005 PIEMCPU pIemCpu = &pVCpu->iem.s;
11006
11007 /*
11008 * See if there is an interrupt pending in TRPM and inject it if we can.
11009 */
11010#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11011 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11012# ifdef IEM_VERIFICATION_MODE_FULL
11013 pIemCpu->uInjectCpl = UINT8_MAX;
11014# endif
11015 if ( pCtx->eflags.Bits.u1IF
11016 && TRPMHasTrap(pVCpu)
11017 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11018 {
11019 uint8_t u8TrapNo;
11020 TRPMEVENT enmType;
11021 RTGCUINT uErrCode;
11022 RTGCPTR uCr2;
11023 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11024 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11025 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11026 TRPMResetTrap(pVCpu);
11027 }
11028#else
11029 iemExecVerificationModeSetup(pIemCpu);
11030 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11031#endif
11032
11033 /*
11034 * Log the state.
11035 */
11036#ifdef LOG_ENABLED
11037 iemLogCurInstr(pVCpu, pCtx, true);
11038#endif
11039
11040 /*
11041 * Do the decoding and emulation.
11042 */
11043 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11044 if (rcStrict == VINF_SUCCESS)
11045 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11046
11047#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11048 /*
11049 * Assert some sanity.
11050 */
11051 iemExecVerificationModeCheck(pIemCpu);
11052#endif
11053
11054 /*
11055 * Maybe re-enter raw-mode and log.
11056 */
11057#ifdef IN_RC
11058 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11059#endif
11060 if (rcStrict != VINF_SUCCESS)
11061 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11062 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11063 return rcStrict;
11064}
11065
11066
11067
11068/**
11069 * Injects a trap, fault, abort, software interrupt or external interrupt.
11070 *
11071 * The parameter list matches TRPMQueryTrapAll pretty closely.
11072 *
11073 * @returns Strict VBox status code.
11074 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11075 * @param u8TrapNo The trap number.
11076 * @param enmType What type is it (trap/fault/abort), software
11077 * interrupt or hardware interrupt.
11078 * @param uErrCode The error code if applicable.
11079 * @param uCr2 The CR2 value if applicable.
11080 * @param cbInstr The instruction length (only relevant for
11081 * software interrupts).
11082 */
11083VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11084 uint8_t cbInstr)
11085{
11086 iemInitDecoder(&pVCpu->iem.s, false);
11087#ifdef DBGFTRACE_ENABLED
11088 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11089 u8TrapNo, enmType, uErrCode, uCr2);
11090#endif
11091
11092 uint32_t fFlags;
11093 switch (enmType)
11094 {
11095 case TRPM_HARDWARE_INT:
11096 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11097 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11098 uErrCode = uCr2 = 0;
11099 break;
11100
11101 case TRPM_SOFTWARE_INT:
11102 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11103 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11104 uErrCode = uCr2 = 0;
11105 break;
11106
11107 case TRPM_TRAP:
11108 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11109 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11110 if (u8TrapNo == X86_XCPT_PF)
11111 fFlags |= IEM_XCPT_FLAGS_CR2;
11112 switch (u8TrapNo)
11113 {
11114 case X86_XCPT_DF:
11115 case X86_XCPT_TS:
11116 case X86_XCPT_NP:
11117 case X86_XCPT_SS:
11118 case X86_XCPT_PF:
11119 case X86_XCPT_AC:
11120 fFlags |= IEM_XCPT_FLAGS_ERR;
11121 break;
11122
11123 case X86_XCPT_NMI:
11124 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11125 break;
11126 }
11127 break;
11128
11129 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11130 }
11131
11132 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11133}
11134
11135
11136/**
11137 * Injects the active TRPM event.
11138 *
11139 * @returns Strict VBox status code.
11140 * @param pVCpu The cross context virtual CPU structure.
11141 */
11142VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11143{
11144#ifndef IEM_IMPLEMENTS_TASKSWITCH
11145 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11146#else
11147 uint8_t u8TrapNo;
11148 TRPMEVENT enmType;
11149 RTGCUINT uErrCode;
11150 RTGCUINTPTR uCr2;
11151 uint8_t cbInstr;
11152 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11153 if (RT_FAILURE(rc))
11154 return rc;
11155
11156 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11157
11158 /** @todo Are there any other codes that imply the event was successfully
11159 * delivered to the guest? See @bugref{6607}. */
11160 if ( rcStrict == VINF_SUCCESS
11161 || rcStrict == VINF_IEM_RAISED_XCPT)
11162 {
11163 TRPMResetTrap(pVCpu);
11164 }
11165 return rcStrict;
11166#endif
11167}
11168
11169
11170VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11171{
11172 return VERR_NOT_IMPLEMENTED;
11173}
11174
11175
11176VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11177{
11178 return VERR_NOT_IMPLEMENTED;
11179}
11180
11181
11182#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11183/**
11184 * Executes a IRET instruction with default operand size.
11185 *
11186 * This is for PATM.
11187 *
11188 * @returns VBox status code.
11189 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11190 * @param pCtxCore The register frame.
11191 */
11192VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11193{
11194 PIEMCPU pIemCpu = &pVCpu->iem.s;
11195 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11196
11197 iemCtxCoreToCtx(pCtx, pCtxCore);
11198 iemInitDecoder(pIemCpu);
11199 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11200 if (rcStrict == VINF_SUCCESS)
11201 iemCtxToCtxCore(pCtxCore, pCtx);
11202 else
11203 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11204 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11205 return rcStrict;
11206}
11207#endif
11208
11209
11210/**
11211 * Macro used by the IEMExec* method to check the given instruction length.
11212 *
11213 * Will return on failure!
11214 *
11215 * @param a_cbInstr The given instruction length.
11216 * @param a_cbMin The minimum length.
11217 */
11218#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11219 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11220 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11221
11222
11223/**
11224 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11225 *
11226 * This API ASSUMES that the caller has already verified that the guest code is
11227 * allowed to access the I/O port. (The I/O port is in the DX register in the
11228 * guest state.)
11229 *
11230 * @returns Strict VBox status code.
11231 * @param pVCpu The cross context virtual CPU structure.
11232 * @param cbValue The size of the I/O port access (1, 2, or 4).
11233 * @param enmAddrMode The addressing mode.
11234 * @param fRepPrefix Indicates whether a repeat prefix is used
11235 * (doesn't matter which for this instruction).
11236 * @param cbInstr The instruction length in bytes.
11237 * @param iEffSeg The effective segment address.
11238 */
11239VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11240 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11241{
11242 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11243 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11244
11245 /*
11246 * State init.
11247 */
11248 PIEMCPU pIemCpu = &pVCpu->iem.s;
11249 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11250
11251 /*
11252 * Switch orgy for getting to the right handler.
11253 */
11254 VBOXSTRICTRC rcStrict;
11255 if (fRepPrefix)
11256 {
11257 switch (enmAddrMode)
11258 {
11259 case IEMMODE_16BIT:
11260 switch (cbValue)
11261 {
11262 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11263 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11264 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11265 default:
11266 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11267 }
11268 break;
11269
11270 case IEMMODE_32BIT:
11271 switch (cbValue)
11272 {
11273 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11274 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11275 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11276 default:
11277 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11278 }
11279 break;
11280
11281 case IEMMODE_64BIT:
11282 switch (cbValue)
11283 {
11284 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11285 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11286 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11287 default:
11288 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11289 }
11290 break;
11291
11292 default:
11293 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11294 }
11295 }
11296 else
11297 {
11298 switch (enmAddrMode)
11299 {
11300 case IEMMODE_16BIT:
11301 switch (cbValue)
11302 {
11303 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11304 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11305 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11306 default:
11307 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11308 }
11309 break;
11310
11311 case IEMMODE_32BIT:
11312 switch (cbValue)
11313 {
11314 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11315 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11316 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11317 default:
11318 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11319 }
11320 break;
11321
11322 case IEMMODE_64BIT:
11323 switch (cbValue)
11324 {
11325 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11326 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11327 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11328 default:
11329 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11330 }
11331 break;
11332
11333 default:
11334 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11335 }
11336 }
11337
11338 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11339}
11340
11341
11342/**
11343 * Interface for HM and EM for executing string I/O IN (read) instructions.
11344 *
11345 * This API ASSUMES that the caller has already verified that the guest code is
11346 * allowed to access the I/O port. (The I/O port is in the DX register in the
11347 * guest state.)
11348 *
11349 * @returns Strict VBox status code.
11350 * @param pVCpu The cross context virtual CPU structure.
11351 * @param cbValue The size of the I/O port access (1, 2, or 4).
11352 * @param enmAddrMode The addressing mode.
11353 * @param fRepPrefix Indicates whether a repeat prefix is used
11354 * (doesn't matter which for this instruction).
11355 * @param cbInstr The instruction length in bytes.
11356 */
11357VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11358 bool fRepPrefix, uint8_t cbInstr)
11359{
11360 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11361
11362 /*
11363 * State init.
11364 */
11365 PIEMCPU pIemCpu = &pVCpu->iem.s;
11366 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11367
11368 /*
11369 * Switch orgy for getting to the right handler.
11370 */
11371 VBOXSTRICTRC rcStrict;
11372 if (fRepPrefix)
11373 {
11374 switch (enmAddrMode)
11375 {
11376 case IEMMODE_16BIT:
11377 switch (cbValue)
11378 {
11379 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11380 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11381 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11382 default:
11383 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11384 }
11385 break;
11386
11387 case IEMMODE_32BIT:
11388 switch (cbValue)
11389 {
11390 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11391 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11392 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11393 default:
11394 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11395 }
11396 break;
11397
11398 case IEMMODE_64BIT:
11399 switch (cbValue)
11400 {
11401 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11402 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11403 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11404 default:
11405 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11406 }
11407 break;
11408
11409 default:
11410 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11411 }
11412 }
11413 else
11414 {
11415 switch (enmAddrMode)
11416 {
11417 case IEMMODE_16BIT:
11418 switch (cbValue)
11419 {
11420 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11421 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11422 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11423 default:
11424 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11425 }
11426 break;
11427
11428 case IEMMODE_32BIT:
11429 switch (cbValue)
11430 {
11431 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11432 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11433 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11434 default:
11435 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11436 }
11437 break;
11438
11439 case IEMMODE_64BIT:
11440 switch (cbValue)
11441 {
11442 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11443 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11444 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11445 default:
11446 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11447 }
11448 break;
11449
11450 default:
11451 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11452 }
11453 }
11454
11455 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11456}
11457
11458
11459
11460/**
11461 * Interface for HM and EM to write to a CRx register.
11462 *
11463 * @returns Strict VBox status code.
11464 * @param pVCpu The cross context virtual CPU structure.
11465 * @param cbInstr The instruction length in bytes.
11466 * @param iCrReg The control register number (destination).
11467 * @param iGReg The general purpose register number (source).
11468 *
11469 * @remarks In ring-0 not all of the state needs to be synced in.
11470 */
11471VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11472{
11473 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11474 Assert(iCrReg < 16);
11475 Assert(iGReg < 16);
11476
11477 PIEMCPU pIemCpu = &pVCpu->iem.s;
11478 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11479 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11480 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11481}
11482
11483
11484/**
11485 * Interface for HM and EM to read from a CRx register.
11486 *
11487 * @returns Strict VBox status code.
11488 * @param pVCpu The cross context virtual CPU structure.
11489 * @param cbInstr The instruction length in bytes.
11490 * @param iGReg The general purpose register number (destination).
11491 * @param iCrReg The control register number (source).
11492 *
11493 * @remarks In ring-0 not all of the state needs to be synced in.
11494 */
11495VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11496{
11497 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11498 Assert(iCrReg < 16);
11499 Assert(iGReg < 16);
11500
11501 PIEMCPU pIemCpu = &pVCpu->iem.s;
11502 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11503 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11504 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11505}
11506
11507
11508/**
11509 * Interface for HM and EM to clear the CR0[TS] bit.
11510 *
11511 * @returns Strict VBox status code.
11512 * @param pVCpu The cross context virtual CPU structure.
11513 * @param cbInstr The instruction length in bytes.
11514 *
11515 * @remarks In ring-0 not all of the state needs to be synced in.
11516 */
11517VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11518{
11519 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11520
11521 PIEMCPU pIemCpu = &pVCpu->iem.s;
11522 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11523 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11524 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11525}
11526
11527
11528/**
11529 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11530 *
11531 * @returns Strict VBox status code.
11532 * @param pVCpu The cross context virtual CPU structure.
11533 * @param cbInstr The instruction length in bytes.
11534 * @param uValue The value to load into CR0.
11535 *
11536 * @remarks In ring-0 not all of the state needs to be synced in.
11537 */
11538VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11539{
11540 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11541
11542 PIEMCPU pIemCpu = &pVCpu->iem.s;
11543 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11544 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11545 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11546}
11547
11548
11549/**
11550 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11551 *
11552 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11553 *
11554 * @returns Strict VBox status code.
11555 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11556 * @param cbInstr The instruction length in bytes.
11557 * @remarks In ring-0 not all of the state needs to be synced in.
11558 * @thread EMT(pVCpu)
11559 */
11560VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11561{
11562 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11563
11564 PIEMCPU pIemCpu = &pVCpu->iem.s;
11565 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11566 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11567 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11568}
11569
11570#ifdef IN_RING3
11571
11572/**
11573 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11574 *
11575 * @returns Merge between @a rcStrict and what the commit operation returned.
11576 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11577 * @param rcStrict The status code returned by ring-0 or raw-mode.
11578 */
11579VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3DoPendingAction(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
11580{
11581 PIEMCPU pIemCpu = &pVCpu->iem.s;
11582
11583 /*
11584 * Retrieve and reset the pending commit.
11585 */
11586 IEMCOMMIT const enmFn = pIemCpu->PendingCommit.enmFn;
11587 pIemCpu->PendingCommit.enmFn = IEMCOMMIT_INVALID;
11588 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11589
11590 /*
11591 * Must reset pass-up status code.
11592 */
11593 pIemCpu->rcPassUp = VINF_SUCCESS;
11594
11595 /*
11596 * Call the function. Currently using switch here instead of function
11597 * pointer table as a switch won't get skewed.
11598 */
11599 VBOXSTRICTRC rcStrictCommit;
11600 switch (enmFn)
11601 {
11602 case IEMCOMMIT_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11603 case IEMCOMMIT_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11604 case IEMCOMMIT_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11605 case IEMCOMMIT_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11606 case IEMCOMMIT_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11607 case IEMCOMMIT_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11608 case IEMCOMMIT_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11609 case IEMCOMMIT_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11610 case IEMCOMMIT_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11611 case IEMCOMMIT_REP_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11612 case IEMCOMMIT_REP_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11613 case IEMCOMMIT_REP_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11614 case IEMCOMMIT_REP_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11615 case IEMCOMMIT_REP_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11616 case IEMCOMMIT_REP_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11617 case IEMCOMMIT_REP_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11618 case IEMCOMMIT_REP_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11619 case IEMCOMMIT_REP_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11620 default:
11621 AssertLogRelMsgFailedReturn(("enmFn=%#x (%d)\n", pIemCpu->PendingCommit.enmFn, pIemCpu->PendingCommit.enmFn), VERR_IEM_IPE_2);
11622 }
11623
11624 /*
11625 * Merge status code (if any) with the incomming one.
11626 */
11627 rcStrictCommit = iemExecStatusCodeFiddling(pIemCpu, rcStrictCommit);
11628 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11629 return rcStrict;
11630 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11631 return rcStrictCommit;
11632
11633 /* Complicated. */
11634 if (RT_FAILURE(rcStrict))
11635 return rcStrict;
11636 if (RT_FAILURE(rcStrictCommit))
11637 return rcStrictCommit;
11638 if ( rcStrict >= VINF_EM_FIRST
11639 && rcStrict <= VINF_EM_LAST)
11640 {
11641 if ( rcStrictCommit >= VINF_EM_FIRST
11642 && rcStrictCommit <= VINF_EM_LAST)
11643 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11644
11645 /* This really shouldn't happen. Check PGM + handler code! */
11646 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_1);
11647 }
11648 /* This shouldn't really happen either, see IOM_SUCCESS. */
11649 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_2);
11650}
11651
11652#endif /* IN_RING */
11653
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette