VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 61683

Last change on this file since 61683 was 61657, checked in by vboxsync, 9 years ago

VMM: Careful with cpumguest and friends!

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 466.2 KB
Line 
1/* $Id: IEMAll.cpp 61657 2016-06-10 13:15:41Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85
86/*********************************************************************************************************************************
87* Header Files *
88*********************************************************************************************************************************/
89#define LOG_GROUP LOG_GROUP_IEM
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/pdm.h>
93#include <VBox/vmm/pgm.h>
94#include <internal/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/tm.h>
99#include <VBox/vmm/dbgf.h>
100#include <VBox/vmm/dbgftrace.h>
101#ifdef VBOX_WITH_RAW_MODE_NOT_R0
102# include <VBox/vmm/patm.h>
103# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
104# include <VBox/vmm/csam.h>
105# endif
106#endif
107#include "IEMInternal.h"
108#ifdef IEM_VERIFICATION_MODE_FULL
109# include <VBox/vmm/rem.h>
110# include <VBox/vmm/mm.h>
111#endif
112#include <VBox/vmm/vm.h>
113#include <VBox/log.h>
114#include <VBox/err.h>
115#include <VBox/param.h>
116#include <VBox/dis.h>
117#include <VBox/disopcode.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123
124/*********************************************************************************************************************************
125* Structures and Typedefs *
126*********************************************************************************************************************************/
127/** @typedef PFNIEMOP
128 * Pointer to an opcode decoder function.
129 */
130
131/** @def FNIEMOP_DEF
132 * Define an opcode decoder function.
133 *
134 * We're using macors for this so that adding and removing parameters as well as
135 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
136 *
137 * @param a_Name The function name.
138 */
139
140
141#if defined(__GNUC__) && defined(RT_ARCH_X86)
142typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
143# define FNIEMOP_DEF(a_Name) \
144 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
145# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
146 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
147# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
148 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
149
150#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
151typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
152# define FNIEMOP_DEF(a_Name) \
153 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
154# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
155 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
156# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
157 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
158
159#elif defined(__GNUC__)
160typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#else
169typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
170# define FNIEMOP_DEF(a_Name) \
171 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
173 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
174# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
175 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
176
177#endif
178
179
180/**
181 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
182 */
183typedef union IEMSELDESC
184{
185 /** The legacy view. */
186 X86DESC Legacy;
187 /** The long mode view. */
188 X86DESC64 Long;
189} IEMSELDESC;
190/** Pointer to a selector descriptor table entry. */
191typedef IEMSELDESC *PIEMSELDESC;
192
193
194/*********************************************************************************************************************************
195* Defined Constants And Macros *
196*********************************************************************************************************************************/
197/** Temporary hack to disable the double execution. Will be removed in favor
198 * of a dedicated execution mode in EM. */
199//#define IEM_VERIFICATION_MODE_NO_REM
200
201/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
202 * due to GCC lacking knowledge about the value range of a switch. */
203#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
204
205/**
206 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
207 * occation.
208 */
209#ifdef LOG_ENABLED
210# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
211 do { \
212 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
213 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
214 } while (0)
215#else
216# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
217 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
218#endif
219
220/**
221 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
222 * occation using the supplied logger statement.
223 *
224 * @param a_LoggerArgs What to log on failure.
225 */
226#ifdef LOG_ENABLED
227# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
228 do { \
229 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
230 /*LogFunc(a_LoggerArgs);*/ \
231 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
232 } while (0)
233#else
234# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
235 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
236#endif
237
238/**
239 * Call an opcode decoder function.
240 *
241 * We're using macors for this so that adding and removing parameters can be
242 * done as we please. See FNIEMOP_DEF.
243 */
244#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
245
246/**
247 * Call a common opcode decoder function taking one extra argument.
248 *
249 * We're using macors for this so that adding and removing parameters can be
250 * done as we please. See FNIEMOP_DEF_1.
251 */
252#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
253
254/**
255 * Call a common opcode decoder function taking one extra argument.
256 *
257 * We're using macors for this so that adding and removing parameters can be
258 * done as we please. See FNIEMOP_DEF_1.
259 */
260#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
261
262/**
263 * Check if we're currently executing in real or virtual 8086 mode.
264 *
265 * @returns @c true if it is, @c false if not.
266 * @param a_pIemCpu The IEM state of the current CPU.
267 */
268#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
269
270/**
271 * Check if we're currently executing in virtual 8086 mode.
272 *
273 * @returns @c true if it is, @c false if not.
274 * @param a_pIemCpu The IEM state of the current CPU.
275 */
276#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
277
278/**
279 * Check if we're currently executing in long mode.
280 *
281 * @returns @c true if it is, @c false if not.
282 * @param a_pIemCpu The IEM state of the current CPU.
283 */
284#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
285
286/**
287 * Check if we're currently executing in real mode.
288 *
289 * @returns @c true if it is, @c false if not.
290 * @param a_pIemCpu The IEM state of the current CPU.
291 */
292#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
293
294/**
295 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
296 * @returns PCCPUMFEATURES
297 * @param a_pIemCpu The IEM state of the current CPU.
298 */
299#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
300
301/**
302 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
303 * @returns PCCPUMFEATURES
304 * @param a_pIemCpu The IEM state of the current CPU.
305 */
306#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
307
308/**
309 * Evaluates to true if we're presenting an Intel CPU to the guest.
310 */
311#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
312
313/**
314 * Evaluates to true if we're presenting an AMD CPU to the guest.
315 */
316#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
317
318/**
319 * Check if the address is canonical.
320 */
321#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
322
323
324/*********************************************************************************************************************************
325* Global Variables *
326*********************************************************************************************************************************/
327extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
328
329
330/** Function table for the ADD instruction. */
331IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
332{
333 iemAImpl_add_u8, iemAImpl_add_u8_locked,
334 iemAImpl_add_u16, iemAImpl_add_u16_locked,
335 iemAImpl_add_u32, iemAImpl_add_u32_locked,
336 iemAImpl_add_u64, iemAImpl_add_u64_locked
337};
338
339/** Function table for the ADC instruction. */
340IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
341{
342 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
343 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
344 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
345 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
346};
347
348/** Function table for the SUB instruction. */
349IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
350{
351 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
352 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
353 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
354 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
355};
356
357/** Function table for the SBB instruction. */
358IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
359{
360 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
361 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
362 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
363 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
364};
365
366/** Function table for the OR instruction. */
367IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
368{
369 iemAImpl_or_u8, iemAImpl_or_u8_locked,
370 iemAImpl_or_u16, iemAImpl_or_u16_locked,
371 iemAImpl_or_u32, iemAImpl_or_u32_locked,
372 iemAImpl_or_u64, iemAImpl_or_u64_locked
373};
374
375/** Function table for the XOR instruction. */
376IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
377{
378 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
379 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
380 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
381 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
382};
383
384/** Function table for the AND instruction. */
385IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
386{
387 iemAImpl_and_u8, iemAImpl_and_u8_locked,
388 iemAImpl_and_u16, iemAImpl_and_u16_locked,
389 iemAImpl_and_u32, iemAImpl_and_u32_locked,
390 iemAImpl_and_u64, iemAImpl_and_u64_locked
391};
392
393/** Function table for the CMP instruction.
394 * @remarks Making operand order ASSUMPTIONS.
395 */
396IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
397{
398 iemAImpl_cmp_u8, NULL,
399 iemAImpl_cmp_u16, NULL,
400 iemAImpl_cmp_u32, NULL,
401 iemAImpl_cmp_u64, NULL
402};
403
404/** Function table for the TEST instruction.
405 * @remarks Making operand order ASSUMPTIONS.
406 */
407IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
408{
409 iemAImpl_test_u8, NULL,
410 iemAImpl_test_u16, NULL,
411 iemAImpl_test_u32, NULL,
412 iemAImpl_test_u64, NULL
413};
414
415/** Function table for the BT instruction. */
416IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
417{
418 NULL, NULL,
419 iemAImpl_bt_u16, NULL,
420 iemAImpl_bt_u32, NULL,
421 iemAImpl_bt_u64, NULL
422};
423
424/** Function table for the BTC instruction. */
425IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
426{
427 NULL, NULL,
428 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
429 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
430 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
431};
432
433/** Function table for the BTR instruction. */
434IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
435{
436 NULL, NULL,
437 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
438 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
439 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
440};
441
442/** Function table for the BTS instruction. */
443IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
444{
445 NULL, NULL,
446 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
447 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
448 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
449};
450
451/** Function table for the BSF instruction. */
452IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
453{
454 NULL, NULL,
455 iemAImpl_bsf_u16, NULL,
456 iemAImpl_bsf_u32, NULL,
457 iemAImpl_bsf_u64, NULL
458};
459
460/** Function table for the BSR instruction. */
461IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
462{
463 NULL, NULL,
464 iemAImpl_bsr_u16, NULL,
465 iemAImpl_bsr_u32, NULL,
466 iemAImpl_bsr_u64, NULL
467};
468
469/** Function table for the IMUL instruction. */
470IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
471{
472 NULL, NULL,
473 iemAImpl_imul_two_u16, NULL,
474 iemAImpl_imul_two_u32, NULL,
475 iemAImpl_imul_two_u64, NULL
476};
477
478/** Group 1 /r lookup table. */
479IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
480{
481 &g_iemAImpl_add,
482 &g_iemAImpl_or,
483 &g_iemAImpl_adc,
484 &g_iemAImpl_sbb,
485 &g_iemAImpl_and,
486 &g_iemAImpl_sub,
487 &g_iemAImpl_xor,
488 &g_iemAImpl_cmp
489};
490
491/** Function table for the INC instruction. */
492IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
493{
494 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
495 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
496 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
497 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
498};
499
500/** Function table for the DEC instruction. */
501IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
502{
503 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
504 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
505 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
506 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
507};
508
509/** Function table for the NEG instruction. */
510IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
511{
512 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
513 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
514 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
515 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
516};
517
518/** Function table for the NOT instruction. */
519IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
520{
521 iemAImpl_not_u8, iemAImpl_not_u8_locked,
522 iemAImpl_not_u16, iemAImpl_not_u16_locked,
523 iemAImpl_not_u32, iemAImpl_not_u32_locked,
524 iemAImpl_not_u64, iemAImpl_not_u64_locked
525};
526
527
528/** Function table for the ROL instruction. */
529IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
530{
531 iemAImpl_rol_u8,
532 iemAImpl_rol_u16,
533 iemAImpl_rol_u32,
534 iemAImpl_rol_u64
535};
536
537/** Function table for the ROR instruction. */
538IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
539{
540 iemAImpl_ror_u8,
541 iemAImpl_ror_u16,
542 iemAImpl_ror_u32,
543 iemAImpl_ror_u64
544};
545
546/** Function table for the RCL instruction. */
547IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
548{
549 iemAImpl_rcl_u8,
550 iemAImpl_rcl_u16,
551 iemAImpl_rcl_u32,
552 iemAImpl_rcl_u64
553};
554
555/** Function table for the RCR instruction. */
556IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
557{
558 iemAImpl_rcr_u8,
559 iemAImpl_rcr_u16,
560 iemAImpl_rcr_u32,
561 iemAImpl_rcr_u64
562};
563
564/** Function table for the SHL instruction. */
565IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
566{
567 iemAImpl_shl_u8,
568 iemAImpl_shl_u16,
569 iemAImpl_shl_u32,
570 iemAImpl_shl_u64
571};
572
573/** Function table for the SHR instruction. */
574IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
575{
576 iemAImpl_shr_u8,
577 iemAImpl_shr_u16,
578 iemAImpl_shr_u32,
579 iemAImpl_shr_u64
580};
581
582/** Function table for the SAR instruction. */
583IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
584{
585 iemAImpl_sar_u8,
586 iemAImpl_sar_u16,
587 iemAImpl_sar_u32,
588 iemAImpl_sar_u64
589};
590
591
592/** Function table for the MUL instruction. */
593IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
594{
595 iemAImpl_mul_u8,
596 iemAImpl_mul_u16,
597 iemAImpl_mul_u32,
598 iemAImpl_mul_u64
599};
600
601/** Function table for the IMUL instruction working implicitly on rAX. */
602IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
603{
604 iemAImpl_imul_u8,
605 iemAImpl_imul_u16,
606 iemAImpl_imul_u32,
607 iemAImpl_imul_u64
608};
609
610/** Function table for the DIV instruction. */
611IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
612{
613 iemAImpl_div_u8,
614 iemAImpl_div_u16,
615 iemAImpl_div_u32,
616 iemAImpl_div_u64
617};
618
619/** Function table for the MUL instruction. */
620IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
621{
622 iemAImpl_idiv_u8,
623 iemAImpl_idiv_u16,
624 iemAImpl_idiv_u32,
625 iemAImpl_idiv_u64
626};
627
628/** Function table for the SHLD instruction */
629IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
630{
631 iemAImpl_shld_u16,
632 iemAImpl_shld_u32,
633 iemAImpl_shld_u64,
634};
635
636/** Function table for the SHRD instruction */
637IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
638{
639 iemAImpl_shrd_u16,
640 iemAImpl_shrd_u32,
641 iemAImpl_shrd_u64,
642};
643
644
645/** Function table for the PUNPCKLBW instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
647/** Function table for the PUNPCKLBD instruction */
648IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
649/** Function table for the PUNPCKLDQ instruction */
650IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
651/** Function table for the PUNPCKLQDQ instruction */
652IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
653
654/** Function table for the PUNPCKHBW instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
656/** Function table for the PUNPCKHBD instruction */
657IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
658/** Function table for the PUNPCKHDQ instruction */
659IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
660/** Function table for the PUNPCKHQDQ instruction */
661IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
662
663/** Function table for the PXOR instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
665/** Function table for the PCMPEQB instruction */
666IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
667/** Function table for the PCMPEQW instruction */
668IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
669/** Function table for the PCMPEQD instruction */
670IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
671
672
673#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
674/** What IEM just wrote. */
675uint8_t g_abIemWrote[256];
676/** How much IEM just wrote. */
677size_t g_cbIemWrote;
678#endif
679
680
681/*********************************************************************************************************************************
682* Internal Functions *
683*********************************************************************************************************************************/
684IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
685IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
686IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
687IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
688/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
689IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
691IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
692IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
693IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
694IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
695IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
696IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
698IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
699IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
700IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
701IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
702IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
703IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
706IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
707IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
708IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
709IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
710IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
711IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
712IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
713IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
714IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
715IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
716IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
717
718#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
719IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
720#endif
721IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
722IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
723
724
725
726/**
727 * Sets the pass up status.
728 *
729 * @returns VINF_SUCCESS.
730 * @param pIemCpu The per CPU IEM state of the calling thread.
731 * @param rcPassUp The pass up status. Must be informational.
732 * VINF_SUCCESS is not allowed.
733 */
734IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
735{
736 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
737
738 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
739 if (rcOldPassUp == VINF_SUCCESS)
740 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
741 /* If both are EM scheduling codes, use EM priority rules. */
742 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
743 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
744 {
745 if (rcPassUp < rcOldPassUp)
746 {
747 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
748 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
749 }
750 else
751 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
752 }
753 /* Override EM scheduling with specific status code. */
754 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
755 {
756 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
757 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
758 }
759 /* Don't override specific status code, first come first served. */
760 else
761 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
762 return VINF_SUCCESS;
763}
764
765
766/**
767 * Calculates the CPU mode.
768 *
769 * This is mainly for updating IEMCPU::enmCpuMode.
770 *
771 * @returns CPU mode.
772 * @param pCtx The register context for the CPU.
773 */
774DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
775{
776 if (CPUMIsGuestIn64BitCodeEx(pCtx))
777 return IEMMODE_64BIT;
778 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
779 return IEMMODE_32BIT;
780 return IEMMODE_16BIT;
781}
782
783
784/**
785 * Initializes the execution state.
786 *
787 * @param pIemCpu The per CPU IEM state.
788 * @param fBypassHandlers Whether to bypass access handlers.
789 *
790 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
791 * side-effects in strict builds.
792 */
793DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
794{
795 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
796 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
797
798 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
799
800#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
801 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
802 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
804 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
805 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
807 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
808 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
809#endif
810
811#ifdef VBOX_WITH_RAW_MODE_NOT_R0
812 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
813#endif
814 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
815 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
816#ifdef VBOX_STRICT
817 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
818 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
819 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
820 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
821 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
822 pIemCpu->uRexReg = 127;
823 pIemCpu->uRexB = 127;
824 pIemCpu->uRexIndex = 127;
825 pIemCpu->iEffSeg = 127;
826 pIemCpu->offOpcode = 127;
827 pIemCpu->cbOpcode = 127;
828#endif
829
830 pIemCpu->cActiveMappings = 0;
831 pIemCpu->iNextMapping = 0;
832 pIemCpu->rcPassUp = VINF_SUCCESS;
833 pIemCpu->fBypassHandlers = fBypassHandlers;
834#ifdef VBOX_WITH_RAW_MODE_NOT_R0
835 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
836 && pCtx->cs.u64Base == 0
837 && pCtx->cs.u32Limit == UINT32_MAX
838 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
839 if (!pIemCpu->fInPatchCode)
840 CPUMRawLeave(pVCpu, VINF_SUCCESS);
841#endif
842
843#ifdef IEM_VERIFICATION_MODE_FULL
844 pIemCpu->fNoRemSavedByExec = pIemCpu->fNoRem;
845 pIemCpu->fNoRem = true;
846#endif
847}
848
849
850/**
851 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
852 *
853 * @param pIemCpu The per CPU IEM state.
854 */
855DECLINLINE(void) iemUninitExec(PIEMCPU pIemCpu)
856{
857 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
858#ifdef IEM_VERIFICATION_MODE_FULL
859 pIemCpu->fNoRem = pIemCpu->fNoRemSavedByExec;
860#endif
861#ifdef VBOX_STRICT
862 pIemCpu->cbOpcode = 0;
863#else
864 NOREF(pIemCpu);
865#endif
866}
867
868
869/**
870 * Initializes the decoder state.
871 *
872 * @param pIemCpu The per CPU IEM state.
873 * @param fBypassHandlers Whether to bypass access handlers.
874 */
875DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
876{
877 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
878 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
879
880 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
881
882#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
883 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
885 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
886 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
887 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
888 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
889 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
890 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
891#endif
892
893#ifdef VBOX_WITH_RAW_MODE_NOT_R0
894 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
895#endif
896 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
897#ifdef IEM_VERIFICATION_MODE_FULL
898 if (pIemCpu->uInjectCpl != UINT8_MAX)
899 pIemCpu->uCpl = pIemCpu->uInjectCpl;
900#endif
901 IEMMODE enmMode = iemCalcCpuMode(pCtx);
902 pIemCpu->enmCpuMode = enmMode;
903 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
904 pIemCpu->enmEffAddrMode = enmMode;
905 if (enmMode != IEMMODE_64BIT)
906 {
907 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
908 pIemCpu->enmEffOpSize = enmMode;
909 }
910 else
911 {
912 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
913 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
914 }
915 pIemCpu->fPrefixes = 0;
916 pIemCpu->uRexReg = 0;
917 pIemCpu->uRexB = 0;
918 pIemCpu->uRexIndex = 0;
919 pIemCpu->iEffSeg = X86_SREG_DS;
920 pIemCpu->offOpcode = 0;
921 pIemCpu->cbOpcode = 0;
922 pIemCpu->cActiveMappings = 0;
923 pIemCpu->iNextMapping = 0;
924 pIemCpu->rcPassUp = VINF_SUCCESS;
925 pIemCpu->fBypassHandlers = fBypassHandlers;
926#ifdef VBOX_WITH_RAW_MODE_NOT_R0
927 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
928 && pCtx->cs.u64Base == 0
929 && pCtx->cs.u32Limit == UINT32_MAX
930 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
931 if (!pIemCpu->fInPatchCode)
932 CPUMRawLeave(pVCpu, VINF_SUCCESS);
933#endif
934
935#ifdef DBGFTRACE_ENABLED
936 switch (enmMode)
937 {
938 case IEMMODE_64BIT:
939 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
940 break;
941 case IEMMODE_32BIT:
942 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
943 break;
944 case IEMMODE_16BIT:
945 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
946 break;
947 }
948#endif
949}
950
951
952/**
953 * Prefetch opcodes the first time when starting executing.
954 *
955 * @returns Strict VBox status code.
956 * @param pIemCpu The IEM state.
957 * @param fBypassHandlers Whether to bypass access handlers.
958 */
959IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
960{
961#ifdef IEM_VERIFICATION_MODE_FULL
962 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
963#endif
964 iemInitDecoder(pIemCpu, fBypassHandlers);
965
966 /*
967 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
968 *
969 * First translate CS:rIP to a physical address.
970 */
971 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
972 uint32_t cbToTryRead;
973 RTGCPTR GCPtrPC;
974 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
975 {
976 cbToTryRead = PAGE_SIZE;
977 GCPtrPC = pCtx->rip;
978 if (!IEM_IS_CANONICAL(GCPtrPC))
979 return iemRaiseGeneralProtectionFault0(pIemCpu);
980 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
981 }
982 else
983 {
984 uint32_t GCPtrPC32 = pCtx->eip;
985 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
986 if (GCPtrPC32 > pCtx->cs.u32Limit)
987 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
988 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
989 if (!cbToTryRead) /* overflowed */
990 {
991 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
992 cbToTryRead = UINT32_MAX;
993 }
994 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
995 Assert(GCPtrPC <= UINT32_MAX);
996 }
997
998#ifdef VBOX_WITH_RAW_MODE_NOT_R0
999 /* Allow interpretation of patch manager code blocks since they can for
1000 instance throw #PFs for perfectly good reasons. */
1001 if (pIemCpu->fInPatchCode)
1002 {
1003 size_t cbRead = 0;
1004 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
1005 AssertRCReturn(rc, rc);
1006 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1007 return VINF_SUCCESS;
1008 }
1009#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1010
1011 RTGCPHYS GCPhys;
1012 uint64_t fFlags;
1013 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
1014 if (RT_FAILURE(rc))
1015 {
1016 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1017 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1018 }
1019 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1020 {
1021 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1022 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1023 }
1024 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1025 {
1026 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1027 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1028 }
1029 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1030 /** @todo Check reserved bits and such stuff. PGM is better at doing
1031 * that, so do it when implementing the guest virtual address
1032 * TLB... */
1033
1034#ifdef IEM_VERIFICATION_MODE_FULL
1035 /*
1036 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1037 * instruction.
1038 */
1039 /** @todo optimize this differently by not using PGMPhysRead. */
1040 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1041 pIemCpu->GCPhysOpcodes = GCPhys;
1042 if ( offPrevOpcodes < cbOldOpcodes
1043 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1044 {
1045 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1046 Assert(cbNew <= RT_ELEMENTS(pIemCpu->abOpcode));
1047 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1048 pIemCpu->cbOpcode = cbNew;
1049 return VINF_SUCCESS;
1050 }
1051#endif
1052
1053 /*
1054 * Read the bytes at this address.
1055 */
1056 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1057#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1058 size_t cbActual;
1059 if ( PATMIsEnabled(pVM)
1060 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1061 {
1062 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1063 Assert(cbActual > 0);
1064 pIemCpu->cbOpcode = (uint8_t)cbActual;
1065 }
1066 else
1067#endif
1068 {
1069 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1070 if (cbToTryRead > cbLeftOnPage)
1071 cbToTryRead = cbLeftOnPage;
1072 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1073 cbToTryRead = sizeof(pIemCpu->abOpcode);
1074
1075 if (!pIemCpu->fBypassHandlers)
1076 {
1077 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1078 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1079 { /* likely */ }
1080 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1081 {
1082 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1083 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1084 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1085 }
1086 else
1087 {
1088 Log((RT_SUCCESS(rcStrict)
1089 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1090 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1091 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1092 return rcStrict;
1093 }
1094 }
1095 else
1096 {
1097 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1098 if (RT_SUCCESS(rc))
1099 { /* likely */ }
1100 else
1101 {
1102 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1103 GCPtrPC, GCPhys, rc, cbToTryRead));
1104 return rc;
1105 }
1106 }
1107 pIemCpu->cbOpcode = cbToTryRead;
1108 }
1109
1110 return VINF_SUCCESS;
1111}
1112
1113
1114/**
1115 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1116 * exception if it fails.
1117 *
1118 * @returns Strict VBox status code.
1119 * @param pIemCpu The IEM state.
1120 * @param cbMin The minimum number of bytes relative offOpcode
1121 * that must be read.
1122 */
1123IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1124{
1125 /*
1126 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1127 *
1128 * First translate CS:rIP to a physical address.
1129 */
1130 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1131 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1132 uint32_t cbToTryRead;
1133 RTGCPTR GCPtrNext;
1134 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1135 {
1136 cbToTryRead = PAGE_SIZE;
1137 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1138 if (!IEM_IS_CANONICAL(GCPtrNext))
1139 return iemRaiseGeneralProtectionFault0(pIemCpu);
1140 }
1141 else
1142 {
1143 uint32_t GCPtrNext32 = pCtx->eip;
1144 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1145 GCPtrNext32 += pIemCpu->cbOpcode;
1146 if (GCPtrNext32 > pCtx->cs.u32Limit)
1147 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1148 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1149 if (!cbToTryRead) /* overflowed */
1150 {
1151 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1152 cbToTryRead = UINT32_MAX;
1153 /** @todo check out wrapping around the code segment. */
1154 }
1155 if (cbToTryRead < cbMin - cbLeft)
1156 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1157 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1158 }
1159
1160 /* Only read up to the end of the page, and make sure we don't read more
1161 than the opcode buffer can hold. */
1162 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1163 if (cbToTryRead > cbLeftOnPage)
1164 cbToTryRead = cbLeftOnPage;
1165 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1166 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1167/** @todo r=bird: Convert assertion into undefined opcode exception? */
1168 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1169
1170#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1171 /* Allow interpretation of patch manager code blocks since they can for
1172 instance throw #PFs for perfectly good reasons. */
1173 if (pIemCpu->fInPatchCode)
1174 {
1175 size_t cbRead = 0;
1176 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1177 AssertRCReturn(rc, rc);
1178 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1179 return VINF_SUCCESS;
1180 }
1181#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1182
1183 RTGCPHYS GCPhys;
1184 uint64_t fFlags;
1185 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1186 if (RT_FAILURE(rc))
1187 {
1188 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1189 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1190 }
1191 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1192 {
1193 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1194 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1195 }
1196 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1197 {
1198 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1199 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1200 }
1201 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1202 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1203 /** @todo Check reserved bits and such stuff. PGM is better at doing
1204 * that, so do it when implementing the guest virtual address
1205 * TLB... */
1206
1207 /*
1208 * Read the bytes at this address.
1209 *
1210 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1211 * and since PATM should only patch the start of an instruction there
1212 * should be no need to check again here.
1213 */
1214 if (!pIemCpu->fBypassHandlers)
1215 {
1216 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1217 cbToTryRead, PGMACCESSORIGIN_IEM);
1218 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1219 { /* likely */ }
1220 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1221 {
1222 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1223 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1224 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1225 }
1226 else
1227 {
1228 Log((RT_SUCCESS(rcStrict)
1229 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1230 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1231 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1232 return rcStrict;
1233 }
1234 }
1235 else
1236 {
1237 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1238 if (RT_SUCCESS(rc))
1239 { /* likely */ }
1240 else
1241 {
1242 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1243 return rc;
1244 }
1245 }
1246 pIemCpu->cbOpcode += cbToTryRead;
1247 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1248
1249 return VINF_SUCCESS;
1250}
1251
1252
1253/**
1254 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1255 *
1256 * @returns Strict VBox status code.
1257 * @param pIemCpu The IEM state.
1258 * @param pb Where to return the opcode byte.
1259 */
1260DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1261{
1262 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1263 if (rcStrict == VINF_SUCCESS)
1264 {
1265 uint8_t offOpcode = pIemCpu->offOpcode;
1266 *pb = pIemCpu->abOpcode[offOpcode];
1267 pIemCpu->offOpcode = offOpcode + 1;
1268 }
1269 else
1270 *pb = 0;
1271 return rcStrict;
1272}
1273
1274
1275/**
1276 * Fetches the next opcode byte.
1277 *
1278 * @returns Strict VBox status code.
1279 * @param pIemCpu The IEM state.
1280 * @param pu8 Where to return the opcode byte.
1281 */
1282DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1283{
1284 uint8_t const offOpcode = pIemCpu->offOpcode;
1285 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1286 {
1287 *pu8 = pIemCpu->abOpcode[offOpcode];
1288 pIemCpu->offOpcode = offOpcode + 1;
1289 return VINF_SUCCESS;
1290 }
1291 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1292}
1293
1294
1295/**
1296 * Fetches the next opcode byte, returns automatically on failure.
1297 *
1298 * @param a_pu8 Where to return the opcode byte.
1299 * @remark Implicitly references pIemCpu.
1300 */
1301#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1302 do \
1303 { \
1304 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1305 if (rcStrict2 != VINF_SUCCESS) \
1306 return rcStrict2; \
1307 } while (0)
1308
1309
1310/**
1311 * Fetches the next signed byte from the opcode stream.
1312 *
1313 * @returns Strict VBox status code.
1314 * @param pIemCpu The IEM state.
1315 * @param pi8 Where to return the signed byte.
1316 */
1317DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1318{
1319 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1320}
1321
1322
1323/**
1324 * Fetches the next signed byte from the opcode stream, returning automatically
1325 * on failure.
1326 *
1327 * @param a_pi8 Where to return the signed byte.
1328 * @remark Implicitly references pIemCpu.
1329 */
1330#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1331 do \
1332 { \
1333 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1334 if (rcStrict2 != VINF_SUCCESS) \
1335 return rcStrict2; \
1336 } while (0)
1337
1338
1339/**
1340 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1341 *
1342 * @returns Strict VBox status code.
1343 * @param pIemCpu The IEM state.
1344 * @param pu16 Where to return the opcode dword.
1345 */
1346DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1347{
1348 uint8_t u8;
1349 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1350 if (rcStrict == VINF_SUCCESS)
1351 *pu16 = (int8_t)u8;
1352 return rcStrict;
1353}
1354
1355
1356/**
1357 * Fetches the next signed byte from the opcode stream, extending it to
1358 * unsigned 16-bit.
1359 *
1360 * @returns Strict VBox status code.
1361 * @param pIemCpu The IEM state.
1362 * @param pu16 Where to return the unsigned word.
1363 */
1364DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1365{
1366 uint8_t const offOpcode = pIemCpu->offOpcode;
1367 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1368 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1369
1370 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1371 pIemCpu->offOpcode = offOpcode + 1;
1372 return VINF_SUCCESS;
1373}
1374
1375
1376/**
1377 * Fetches the next signed byte from the opcode stream and sign-extending it to
1378 * a word, returning automatically on failure.
1379 *
1380 * @param a_pu16 Where to return the word.
1381 * @remark Implicitly references pIemCpu.
1382 */
1383#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1384 do \
1385 { \
1386 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1387 if (rcStrict2 != VINF_SUCCESS) \
1388 return rcStrict2; \
1389 } while (0)
1390
1391
1392/**
1393 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1394 *
1395 * @returns Strict VBox status code.
1396 * @param pIemCpu The IEM state.
1397 * @param pu32 Where to return the opcode dword.
1398 */
1399DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1400{
1401 uint8_t u8;
1402 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1403 if (rcStrict == VINF_SUCCESS)
1404 *pu32 = (int8_t)u8;
1405 return rcStrict;
1406}
1407
1408
1409/**
1410 * Fetches the next signed byte from the opcode stream, extending it to
1411 * unsigned 32-bit.
1412 *
1413 * @returns Strict VBox status code.
1414 * @param pIemCpu The IEM state.
1415 * @param pu32 Where to return the unsigned dword.
1416 */
1417DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1418{
1419 uint8_t const offOpcode = pIemCpu->offOpcode;
1420 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1421 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1422
1423 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1424 pIemCpu->offOpcode = offOpcode + 1;
1425 return VINF_SUCCESS;
1426}
1427
1428
1429/**
1430 * Fetches the next signed byte from the opcode stream and sign-extending it to
1431 * a word, returning automatically on failure.
1432 *
1433 * @param a_pu32 Where to return the word.
1434 * @remark Implicitly references pIemCpu.
1435 */
1436#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1437 do \
1438 { \
1439 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1440 if (rcStrict2 != VINF_SUCCESS) \
1441 return rcStrict2; \
1442 } while (0)
1443
1444
1445/**
1446 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1447 *
1448 * @returns Strict VBox status code.
1449 * @param pIemCpu The IEM state.
1450 * @param pu64 Where to return the opcode qword.
1451 */
1452DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1453{
1454 uint8_t u8;
1455 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1456 if (rcStrict == VINF_SUCCESS)
1457 *pu64 = (int8_t)u8;
1458 return rcStrict;
1459}
1460
1461
1462/**
1463 * Fetches the next signed byte from the opcode stream, extending it to
1464 * unsigned 64-bit.
1465 *
1466 * @returns Strict VBox status code.
1467 * @param pIemCpu The IEM state.
1468 * @param pu64 Where to return the unsigned qword.
1469 */
1470DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1471{
1472 uint8_t const offOpcode = pIemCpu->offOpcode;
1473 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1474 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1475
1476 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1477 pIemCpu->offOpcode = offOpcode + 1;
1478 return VINF_SUCCESS;
1479}
1480
1481
1482/**
1483 * Fetches the next signed byte from the opcode stream and sign-extending it to
1484 * a word, returning automatically on failure.
1485 *
1486 * @param a_pu64 Where to return the word.
1487 * @remark Implicitly references pIemCpu.
1488 */
1489#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1490 do \
1491 { \
1492 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1493 if (rcStrict2 != VINF_SUCCESS) \
1494 return rcStrict2; \
1495 } while (0)
1496
1497
1498/**
1499 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1500 *
1501 * @returns Strict VBox status code.
1502 * @param pIemCpu The IEM state.
1503 * @param pu16 Where to return the opcode word.
1504 */
1505DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1506{
1507 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1508 if (rcStrict == VINF_SUCCESS)
1509 {
1510 uint8_t offOpcode = pIemCpu->offOpcode;
1511 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1512 pIemCpu->offOpcode = offOpcode + 2;
1513 }
1514 else
1515 *pu16 = 0;
1516 return rcStrict;
1517}
1518
1519
1520/**
1521 * Fetches the next opcode word.
1522 *
1523 * @returns Strict VBox status code.
1524 * @param pIemCpu The IEM state.
1525 * @param pu16 Where to return the opcode word.
1526 */
1527DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1528{
1529 uint8_t const offOpcode = pIemCpu->offOpcode;
1530 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1531 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1532
1533 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1534 pIemCpu->offOpcode = offOpcode + 2;
1535 return VINF_SUCCESS;
1536}
1537
1538
1539/**
1540 * Fetches the next opcode word, returns automatically on failure.
1541 *
1542 * @param a_pu16 Where to return the opcode word.
1543 * @remark Implicitly references pIemCpu.
1544 */
1545#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1546 do \
1547 { \
1548 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1549 if (rcStrict2 != VINF_SUCCESS) \
1550 return rcStrict2; \
1551 } while (0)
1552
1553
1554/**
1555 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1556 *
1557 * @returns Strict VBox status code.
1558 * @param pIemCpu The IEM state.
1559 * @param pu32 Where to return the opcode double word.
1560 */
1561DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1562{
1563 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1564 if (rcStrict == VINF_SUCCESS)
1565 {
1566 uint8_t offOpcode = pIemCpu->offOpcode;
1567 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1568 pIemCpu->offOpcode = offOpcode + 2;
1569 }
1570 else
1571 *pu32 = 0;
1572 return rcStrict;
1573}
1574
1575
1576/**
1577 * Fetches the next opcode word, zero extending it to a double word.
1578 *
1579 * @returns Strict VBox status code.
1580 * @param pIemCpu The IEM state.
1581 * @param pu32 Where to return the opcode double word.
1582 */
1583DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1584{
1585 uint8_t const offOpcode = pIemCpu->offOpcode;
1586 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1587 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1588
1589 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1590 pIemCpu->offOpcode = offOpcode + 2;
1591 return VINF_SUCCESS;
1592}
1593
1594
1595/**
1596 * Fetches the next opcode word and zero extends it to a double word, returns
1597 * automatically on failure.
1598 *
1599 * @param a_pu32 Where to return the opcode double word.
1600 * @remark Implicitly references pIemCpu.
1601 */
1602#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1603 do \
1604 { \
1605 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1606 if (rcStrict2 != VINF_SUCCESS) \
1607 return rcStrict2; \
1608 } while (0)
1609
1610
1611/**
1612 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1613 *
1614 * @returns Strict VBox status code.
1615 * @param pIemCpu The IEM state.
1616 * @param pu64 Where to return the opcode quad word.
1617 */
1618DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1619{
1620 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1621 if (rcStrict == VINF_SUCCESS)
1622 {
1623 uint8_t offOpcode = pIemCpu->offOpcode;
1624 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1625 pIemCpu->offOpcode = offOpcode + 2;
1626 }
1627 else
1628 *pu64 = 0;
1629 return rcStrict;
1630}
1631
1632
1633/**
1634 * Fetches the next opcode word, zero extending it to a quad word.
1635 *
1636 * @returns Strict VBox status code.
1637 * @param pIemCpu The IEM state.
1638 * @param pu64 Where to return the opcode quad word.
1639 */
1640DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1641{
1642 uint8_t const offOpcode = pIemCpu->offOpcode;
1643 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1644 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1645
1646 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1647 pIemCpu->offOpcode = offOpcode + 2;
1648 return VINF_SUCCESS;
1649}
1650
1651
1652/**
1653 * Fetches the next opcode word and zero extends it to a quad word, returns
1654 * automatically on failure.
1655 *
1656 * @param a_pu64 Where to return the opcode quad word.
1657 * @remark Implicitly references pIemCpu.
1658 */
1659#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1660 do \
1661 { \
1662 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1663 if (rcStrict2 != VINF_SUCCESS) \
1664 return rcStrict2; \
1665 } while (0)
1666
1667
1668/**
1669 * Fetches the next signed word from the opcode stream.
1670 *
1671 * @returns Strict VBox status code.
1672 * @param pIemCpu The IEM state.
1673 * @param pi16 Where to return the signed word.
1674 */
1675DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1676{
1677 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1678}
1679
1680
1681/**
1682 * Fetches the next signed word from the opcode stream, returning automatically
1683 * on failure.
1684 *
1685 * @param a_pi16 Where to return the signed word.
1686 * @remark Implicitly references pIemCpu.
1687 */
1688#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1689 do \
1690 { \
1691 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1692 if (rcStrict2 != VINF_SUCCESS) \
1693 return rcStrict2; \
1694 } while (0)
1695
1696
1697/**
1698 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1699 *
1700 * @returns Strict VBox status code.
1701 * @param pIemCpu The IEM state.
1702 * @param pu32 Where to return the opcode dword.
1703 */
1704DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1705{
1706 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1707 if (rcStrict == VINF_SUCCESS)
1708 {
1709 uint8_t offOpcode = pIemCpu->offOpcode;
1710 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1711 pIemCpu->abOpcode[offOpcode + 1],
1712 pIemCpu->abOpcode[offOpcode + 2],
1713 pIemCpu->abOpcode[offOpcode + 3]);
1714 pIemCpu->offOpcode = offOpcode + 4;
1715 }
1716 else
1717 *pu32 = 0;
1718 return rcStrict;
1719}
1720
1721
1722/**
1723 * Fetches the next opcode dword.
1724 *
1725 * @returns Strict VBox status code.
1726 * @param pIemCpu The IEM state.
1727 * @param pu32 Where to return the opcode double word.
1728 */
1729DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1730{
1731 uint8_t const offOpcode = pIemCpu->offOpcode;
1732 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1733 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1734
1735 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1736 pIemCpu->abOpcode[offOpcode + 1],
1737 pIemCpu->abOpcode[offOpcode + 2],
1738 pIemCpu->abOpcode[offOpcode + 3]);
1739 pIemCpu->offOpcode = offOpcode + 4;
1740 return VINF_SUCCESS;
1741}
1742
1743
1744/**
1745 * Fetches the next opcode dword, returns automatically on failure.
1746 *
1747 * @param a_pu32 Where to return the opcode dword.
1748 * @remark Implicitly references pIemCpu.
1749 */
1750#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1751 do \
1752 { \
1753 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1754 if (rcStrict2 != VINF_SUCCESS) \
1755 return rcStrict2; \
1756 } while (0)
1757
1758
1759/**
1760 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1761 *
1762 * @returns Strict VBox status code.
1763 * @param pIemCpu The IEM state.
1764 * @param pu64 Where to return the opcode dword.
1765 */
1766DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1767{
1768 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1769 if (rcStrict == VINF_SUCCESS)
1770 {
1771 uint8_t offOpcode = pIemCpu->offOpcode;
1772 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1773 pIemCpu->abOpcode[offOpcode + 1],
1774 pIemCpu->abOpcode[offOpcode + 2],
1775 pIemCpu->abOpcode[offOpcode + 3]);
1776 pIemCpu->offOpcode = offOpcode + 4;
1777 }
1778 else
1779 *pu64 = 0;
1780 return rcStrict;
1781}
1782
1783
1784/**
1785 * Fetches the next opcode dword, zero extending it to a quad word.
1786 *
1787 * @returns Strict VBox status code.
1788 * @param pIemCpu The IEM state.
1789 * @param pu64 Where to return the opcode quad word.
1790 */
1791DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1792{
1793 uint8_t const offOpcode = pIemCpu->offOpcode;
1794 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1795 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1796
1797 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1798 pIemCpu->abOpcode[offOpcode + 1],
1799 pIemCpu->abOpcode[offOpcode + 2],
1800 pIemCpu->abOpcode[offOpcode + 3]);
1801 pIemCpu->offOpcode = offOpcode + 4;
1802 return VINF_SUCCESS;
1803}
1804
1805
1806/**
1807 * Fetches the next opcode dword and zero extends it to a quad word, returns
1808 * automatically on failure.
1809 *
1810 * @param a_pu64 Where to return the opcode quad word.
1811 * @remark Implicitly references pIemCpu.
1812 */
1813#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1814 do \
1815 { \
1816 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1817 if (rcStrict2 != VINF_SUCCESS) \
1818 return rcStrict2; \
1819 } while (0)
1820
1821
1822/**
1823 * Fetches the next signed double word from the opcode stream.
1824 *
1825 * @returns Strict VBox status code.
1826 * @param pIemCpu The IEM state.
1827 * @param pi32 Where to return the signed double word.
1828 */
1829DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1830{
1831 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1832}
1833
1834/**
1835 * Fetches the next signed double word from the opcode stream, returning
1836 * automatically on failure.
1837 *
1838 * @param a_pi32 Where to return the signed double word.
1839 * @remark Implicitly references pIemCpu.
1840 */
1841#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1842 do \
1843 { \
1844 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1845 if (rcStrict2 != VINF_SUCCESS) \
1846 return rcStrict2; \
1847 } while (0)
1848
1849
1850/**
1851 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1852 *
1853 * @returns Strict VBox status code.
1854 * @param pIemCpu The IEM state.
1855 * @param pu64 Where to return the opcode qword.
1856 */
1857DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1858{
1859 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1860 if (rcStrict == VINF_SUCCESS)
1861 {
1862 uint8_t offOpcode = pIemCpu->offOpcode;
1863 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1864 pIemCpu->abOpcode[offOpcode + 1],
1865 pIemCpu->abOpcode[offOpcode + 2],
1866 pIemCpu->abOpcode[offOpcode + 3]);
1867 pIemCpu->offOpcode = offOpcode + 4;
1868 }
1869 else
1870 *pu64 = 0;
1871 return rcStrict;
1872}
1873
1874
1875/**
1876 * Fetches the next opcode dword, sign extending it into a quad word.
1877 *
1878 * @returns Strict VBox status code.
1879 * @param pIemCpu The IEM state.
1880 * @param pu64 Where to return the opcode quad word.
1881 */
1882DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1883{
1884 uint8_t const offOpcode = pIemCpu->offOpcode;
1885 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1886 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1887
1888 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1889 pIemCpu->abOpcode[offOpcode + 1],
1890 pIemCpu->abOpcode[offOpcode + 2],
1891 pIemCpu->abOpcode[offOpcode + 3]);
1892 *pu64 = i32;
1893 pIemCpu->offOpcode = offOpcode + 4;
1894 return VINF_SUCCESS;
1895}
1896
1897
1898/**
1899 * Fetches the next opcode double word and sign extends it to a quad word,
1900 * returns automatically on failure.
1901 *
1902 * @param a_pu64 Where to return the opcode quad word.
1903 * @remark Implicitly references pIemCpu.
1904 */
1905#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1906 do \
1907 { \
1908 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1909 if (rcStrict2 != VINF_SUCCESS) \
1910 return rcStrict2; \
1911 } while (0)
1912
1913
1914/**
1915 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1916 *
1917 * @returns Strict VBox status code.
1918 * @param pIemCpu The IEM state.
1919 * @param pu64 Where to return the opcode qword.
1920 */
1921DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1922{
1923 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1924 if (rcStrict == VINF_SUCCESS)
1925 {
1926 uint8_t offOpcode = pIemCpu->offOpcode;
1927 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1928 pIemCpu->abOpcode[offOpcode + 1],
1929 pIemCpu->abOpcode[offOpcode + 2],
1930 pIemCpu->abOpcode[offOpcode + 3],
1931 pIemCpu->abOpcode[offOpcode + 4],
1932 pIemCpu->abOpcode[offOpcode + 5],
1933 pIemCpu->abOpcode[offOpcode + 6],
1934 pIemCpu->abOpcode[offOpcode + 7]);
1935 pIemCpu->offOpcode = offOpcode + 8;
1936 }
1937 else
1938 *pu64 = 0;
1939 return rcStrict;
1940}
1941
1942
1943/**
1944 * Fetches the next opcode qword.
1945 *
1946 * @returns Strict VBox status code.
1947 * @param pIemCpu The IEM state.
1948 * @param pu64 Where to return the opcode qword.
1949 */
1950DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1951{
1952 uint8_t const offOpcode = pIemCpu->offOpcode;
1953 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1954 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1955
1956 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1957 pIemCpu->abOpcode[offOpcode + 1],
1958 pIemCpu->abOpcode[offOpcode + 2],
1959 pIemCpu->abOpcode[offOpcode + 3],
1960 pIemCpu->abOpcode[offOpcode + 4],
1961 pIemCpu->abOpcode[offOpcode + 5],
1962 pIemCpu->abOpcode[offOpcode + 6],
1963 pIemCpu->abOpcode[offOpcode + 7]);
1964 pIemCpu->offOpcode = offOpcode + 8;
1965 return VINF_SUCCESS;
1966}
1967
1968
1969/**
1970 * Fetches the next opcode quad word, returns automatically on failure.
1971 *
1972 * @param a_pu64 Where to return the opcode quad word.
1973 * @remark Implicitly references pIemCpu.
1974 */
1975#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1976 do \
1977 { \
1978 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1979 if (rcStrict2 != VINF_SUCCESS) \
1980 return rcStrict2; \
1981 } while (0)
1982
1983
1984/** @name Misc Worker Functions.
1985 * @{
1986 */
1987
1988
1989/**
1990 * Validates a new SS segment.
1991 *
1992 * @returns VBox strict status code.
1993 * @param pIemCpu The IEM per CPU instance data.
1994 * @param pCtx The CPU context.
1995 * @param NewSS The new SS selctor.
1996 * @param uCpl The CPL to load the stack for.
1997 * @param pDesc Where to return the descriptor.
1998 */
1999IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
2000{
2001 NOREF(pCtx);
2002
2003 /* Null selectors are not allowed (we're not called for dispatching
2004 interrupts with SS=0 in long mode). */
2005 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2006 {
2007 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2008 return iemRaiseTaskSwitchFault0(pIemCpu);
2009 }
2010
2011 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2012 if ((NewSS & X86_SEL_RPL) != uCpl)
2013 {
2014 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2015 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2016 }
2017
2018 /*
2019 * Read the descriptor.
2020 */
2021 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
2022 if (rcStrict != VINF_SUCCESS)
2023 return rcStrict;
2024
2025 /*
2026 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2027 */
2028 if (!pDesc->Legacy.Gen.u1DescType)
2029 {
2030 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2031 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2032 }
2033
2034 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2035 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2036 {
2037 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2038 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2039 }
2040 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2041 {
2042 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2043 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2044 }
2045
2046 /* Is it there? */
2047 /** @todo testcase: Is this checked before the canonical / limit check below? */
2048 if (!pDesc->Legacy.Gen.u1Present)
2049 {
2050 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2051 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2052 }
2053
2054 return VINF_SUCCESS;
2055}
2056
2057
2058/**
2059 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2060 * not.
2061 *
2062 * @param a_pIemCpu The IEM per CPU data.
2063 * @param a_pCtx The CPU context.
2064 */
2065#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2066# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2067 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2068 ? (a_pCtx)->eflags.u \
2069 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2070#else
2071# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2072 ( (a_pCtx)->eflags.u )
2073#endif
2074
2075/**
2076 * Updates the EFLAGS in the correct manner wrt. PATM.
2077 *
2078 * @param a_pIemCpu The IEM per CPU data.
2079 * @param a_pCtx The CPU context.
2080 * @param a_fEfl The new EFLAGS.
2081 */
2082#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2083# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2084 do { \
2085 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2086 (a_pCtx)->eflags.u = (a_fEfl); \
2087 else \
2088 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2089 } while (0)
2090#else
2091# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2092 do { \
2093 (a_pCtx)->eflags.u = (a_fEfl); \
2094 } while (0)
2095#endif
2096
2097
2098/** @} */
2099
2100/** @name Raising Exceptions.
2101 *
2102 * @{
2103 */
2104
2105/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2106 * @{ */
2107/** CPU exception. */
2108#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2109/** External interrupt (from PIC, APIC, whatever). */
2110#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2111/** Software interrupt (int or into, not bound).
2112 * Returns to the following instruction */
2113#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2114/** Takes an error code. */
2115#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2116/** Takes a CR2. */
2117#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2118/** Generated by the breakpoint instruction. */
2119#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2120/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2121#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2122/** @} */
2123
2124
2125/**
2126 * Loads the specified stack far pointer from the TSS.
2127 *
2128 * @returns VBox strict status code.
2129 * @param pIemCpu The IEM per CPU instance data.
2130 * @param pCtx The CPU context.
2131 * @param uCpl The CPL to load the stack for.
2132 * @param pSelSS Where to return the new stack segment.
2133 * @param puEsp Where to return the new stack pointer.
2134 */
2135IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2136 PRTSEL pSelSS, uint32_t *puEsp)
2137{
2138 VBOXSTRICTRC rcStrict;
2139 Assert(uCpl < 4);
2140
2141 switch (pCtx->tr.Attr.n.u4Type)
2142 {
2143 /*
2144 * 16-bit TSS (X86TSS16).
2145 */
2146 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2147 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2148 {
2149 uint32_t off = uCpl * 4 + 2;
2150 if (off + 4 <= pCtx->tr.u32Limit)
2151 {
2152 /** @todo check actual access pattern here. */
2153 uint32_t u32Tmp = 0; /* gcc maybe... */
2154 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2155 if (rcStrict == VINF_SUCCESS)
2156 {
2157 *puEsp = RT_LOWORD(u32Tmp);
2158 *pSelSS = RT_HIWORD(u32Tmp);
2159 return VINF_SUCCESS;
2160 }
2161 }
2162 else
2163 {
2164 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2165 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2166 }
2167 break;
2168 }
2169
2170 /*
2171 * 32-bit TSS (X86TSS32).
2172 */
2173 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2174 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2175 {
2176 uint32_t off = uCpl * 8 + 4;
2177 if (off + 7 <= pCtx->tr.u32Limit)
2178 {
2179/** @todo check actual access pattern here. */
2180 uint64_t u64Tmp;
2181 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2182 if (rcStrict == VINF_SUCCESS)
2183 {
2184 *puEsp = u64Tmp & UINT32_MAX;
2185 *pSelSS = (RTSEL)(u64Tmp >> 32);
2186 return VINF_SUCCESS;
2187 }
2188 }
2189 else
2190 {
2191 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2192 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2193 }
2194 break;
2195 }
2196
2197 default:
2198 AssertFailed();
2199 rcStrict = VERR_IEM_IPE_4;
2200 break;
2201 }
2202
2203 *puEsp = 0; /* make gcc happy */
2204 *pSelSS = 0; /* make gcc happy */
2205 return rcStrict;
2206}
2207
2208
2209/**
2210 * Loads the specified stack pointer from the 64-bit TSS.
2211 *
2212 * @returns VBox strict status code.
2213 * @param pIemCpu The IEM per CPU instance data.
2214 * @param pCtx The CPU context.
2215 * @param uCpl The CPL to load the stack for.
2216 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2217 * @param puRsp Where to return the new stack pointer.
2218 */
2219IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2220{
2221 Assert(uCpl < 4);
2222 Assert(uIst < 8);
2223 *puRsp = 0; /* make gcc happy */
2224
2225 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2226
2227 uint32_t off;
2228 if (uIst)
2229 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2230 else
2231 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2232 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2233 {
2234 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2235 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2236 }
2237
2238 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2239}
2240
2241
2242/**
2243 * Adjust the CPU state according to the exception being raised.
2244 *
2245 * @param pCtx The CPU context.
2246 * @param u8Vector The exception that has been raised.
2247 */
2248DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2249{
2250 switch (u8Vector)
2251 {
2252 case X86_XCPT_DB:
2253 pCtx->dr[7] &= ~X86_DR7_GD;
2254 break;
2255 /** @todo Read the AMD and Intel exception reference... */
2256 }
2257}
2258
2259
2260/**
2261 * Implements exceptions and interrupts for real mode.
2262 *
2263 * @returns VBox strict status code.
2264 * @param pIemCpu The IEM per CPU instance data.
2265 * @param pCtx The CPU context.
2266 * @param cbInstr The number of bytes to offset rIP by in the return
2267 * address.
2268 * @param u8Vector The interrupt / exception vector number.
2269 * @param fFlags The flags.
2270 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2271 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2272 */
2273IEM_STATIC VBOXSTRICTRC
2274iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2275 PCPUMCTX pCtx,
2276 uint8_t cbInstr,
2277 uint8_t u8Vector,
2278 uint32_t fFlags,
2279 uint16_t uErr,
2280 uint64_t uCr2)
2281{
2282 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2283 NOREF(uErr); NOREF(uCr2);
2284
2285 /*
2286 * Read the IDT entry.
2287 */
2288 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2289 {
2290 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2291 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2292 }
2293 RTFAR16 Idte;
2294 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2295 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2296 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2297 return rcStrict;
2298
2299 /*
2300 * Push the stack frame.
2301 */
2302 uint16_t *pu16Frame;
2303 uint64_t uNewRsp;
2304 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2305 if (rcStrict != VINF_SUCCESS)
2306 return rcStrict;
2307
2308 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2309#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2310 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2311 if (pIemCpu->uTargetCpu <= IEMTARGETCPU_186)
2312 fEfl |= UINT16_C(0xf000);
2313#endif
2314 pu16Frame[2] = (uint16_t)fEfl;
2315 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2316 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2317 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2318 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2319 return rcStrict;
2320
2321 /*
2322 * Load the vector address into cs:ip and make exception specific state
2323 * adjustments.
2324 */
2325 pCtx->cs.Sel = Idte.sel;
2326 pCtx->cs.ValidSel = Idte.sel;
2327 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2328 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2329 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2330 pCtx->rip = Idte.off;
2331 fEfl &= ~X86_EFL_IF;
2332 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2333
2334 /** @todo do we actually do this in real mode? */
2335 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2336 iemRaiseXcptAdjustState(pCtx, u8Vector);
2337
2338 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2339}
2340
2341
2342/**
2343 * Loads a NULL data selector into when coming from V8086 mode.
2344 *
2345 * @param pIemCpu The IEM per CPU instance data.
2346 * @param pSReg Pointer to the segment register.
2347 */
2348IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2349{
2350 pSReg->Sel = 0;
2351 pSReg->ValidSel = 0;
2352 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2353 {
2354 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2355 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2356 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2357 }
2358 else
2359 {
2360 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2361 /** @todo check this on AMD-V */
2362 pSReg->u64Base = 0;
2363 pSReg->u32Limit = 0;
2364 }
2365}
2366
2367
2368/**
2369 * Loads a segment selector during a task switch in V8086 mode.
2370 *
2371 * @param pIemCpu The IEM per CPU instance data.
2372 * @param pSReg Pointer to the segment register.
2373 * @param uSel The selector value to load.
2374 */
2375IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2376{
2377 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2378 pSReg->Sel = uSel;
2379 pSReg->ValidSel = uSel;
2380 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2381 pSReg->u64Base = uSel << 4;
2382 pSReg->u32Limit = 0xffff;
2383 pSReg->Attr.u = 0xf3;
2384}
2385
2386
2387/**
2388 * Loads a NULL data selector into a selector register, both the hidden and
2389 * visible parts, in protected mode.
2390 *
2391 * @param pIemCpu The IEM state of the calling EMT.
2392 * @param pSReg Pointer to the segment register.
2393 * @param uRpl The RPL.
2394 */
2395IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2396{
2397 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2398 * data selector in protected mode. */
2399 pSReg->Sel = uRpl;
2400 pSReg->ValidSel = uRpl;
2401 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2402 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2403 {
2404 /* VT-x (Intel 3960x) observed doing something like this. */
2405 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2406 pSReg->u32Limit = UINT32_MAX;
2407 pSReg->u64Base = 0;
2408 }
2409 else
2410 {
2411 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2412 pSReg->u32Limit = 0;
2413 pSReg->u64Base = 0;
2414 }
2415}
2416
2417
2418/**
2419 * Loads a segment selector during a task switch in protected mode.
2420 *
2421 * In this task switch scenario, we would throw \#TS exceptions rather than
2422 * \#GPs.
2423 *
2424 * @returns VBox strict status code.
2425 * @param pIemCpu The IEM per CPU instance data.
2426 * @param pSReg Pointer to the segment register.
2427 * @param uSel The new selector value.
2428 *
2429 * @remarks This does _not_ handle CS or SS.
2430 * @remarks This expects pIemCpu->uCpl to be up to date.
2431 */
2432IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2433{
2434 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2435
2436 /* Null data selector. */
2437 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2438 {
2439 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2440 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2441 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2442 return VINF_SUCCESS;
2443 }
2444
2445 /* Fetch the descriptor. */
2446 IEMSELDESC Desc;
2447 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2448 if (rcStrict != VINF_SUCCESS)
2449 {
2450 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2451 VBOXSTRICTRC_VAL(rcStrict)));
2452 return rcStrict;
2453 }
2454
2455 /* Must be a data segment or readable code segment. */
2456 if ( !Desc.Legacy.Gen.u1DescType
2457 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2458 {
2459 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2460 Desc.Legacy.Gen.u4Type));
2461 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2462 }
2463
2464 /* Check privileges for data segments and non-conforming code segments. */
2465 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2466 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2467 {
2468 /* The RPL and the new CPL must be less than or equal to the DPL. */
2469 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2470 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2471 {
2472 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2473 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2474 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2475 }
2476 }
2477
2478 /* Is it there? */
2479 if (!Desc.Legacy.Gen.u1Present)
2480 {
2481 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2482 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2483 }
2484
2485 /* The base and limit. */
2486 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2487 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2488
2489 /*
2490 * Ok, everything checked out fine. Now set the accessed bit before
2491 * committing the result into the registers.
2492 */
2493 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2494 {
2495 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2496 if (rcStrict != VINF_SUCCESS)
2497 return rcStrict;
2498 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2499 }
2500
2501 /* Commit */
2502 pSReg->Sel = uSel;
2503 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2504 pSReg->u32Limit = cbLimit;
2505 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2506 pSReg->ValidSel = uSel;
2507 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2508 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2509 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2510
2511 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2512 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2513 return VINF_SUCCESS;
2514}
2515
2516
2517/**
2518 * Performs a task switch.
2519 *
2520 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2521 * caller is responsible for performing the necessary checks (like DPL, TSS
2522 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2523 * reference for JMP, CALL, IRET.
2524 *
2525 * If the task switch is the due to a software interrupt or hardware exception,
2526 * the caller is responsible for validating the TSS selector and descriptor. See
2527 * Intel Instruction reference for INT n.
2528 *
2529 * @returns VBox strict status code.
2530 * @param pIemCpu The IEM per CPU instance data.
2531 * @param pCtx The CPU context.
2532 * @param enmTaskSwitch What caused this task switch.
2533 * @param uNextEip The EIP effective after the task switch.
2534 * @param fFlags The flags.
2535 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2536 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2537 * @param SelTSS The TSS selector of the new task.
2538 * @param pNewDescTSS Pointer to the new TSS descriptor.
2539 */
2540IEM_STATIC VBOXSTRICTRC
2541iemTaskSwitch(PIEMCPU pIemCpu,
2542 PCPUMCTX pCtx,
2543 IEMTASKSWITCH enmTaskSwitch,
2544 uint32_t uNextEip,
2545 uint32_t fFlags,
2546 uint16_t uErr,
2547 uint64_t uCr2,
2548 RTSEL SelTSS,
2549 PIEMSELDESC pNewDescTSS)
2550{
2551 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2552 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2553
2554 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2555 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2556 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2557 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2558 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2559
2560 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2561 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2562
2563 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2564 fIsNewTSS386, pCtx->eip, uNextEip));
2565
2566 /* Update CR2 in case it's a page-fault. */
2567 /** @todo This should probably be done much earlier in IEM/PGM. See
2568 * @bugref{5653#c49}. */
2569 if (fFlags & IEM_XCPT_FLAGS_CR2)
2570 pCtx->cr2 = uCr2;
2571
2572 /*
2573 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2574 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2575 */
2576 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2577 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2578 if (uNewTSSLimit < uNewTSSLimitMin)
2579 {
2580 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2581 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2582 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2583 }
2584
2585 /*
2586 * Check the current TSS limit. The last written byte to the current TSS during the
2587 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2588 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2589 *
2590 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2591 * end up with smaller than "legal" TSS limits.
2592 */
2593 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2594 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2595 if (uCurTSSLimit < uCurTSSLimitMin)
2596 {
2597 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2598 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2599 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2600 }
2601
2602 /*
2603 * Verify that the new TSS can be accessed and map it. Map only the required contents
2604 * and not the entire TSS.
2605 */
2606 void *pvNewTSS;
2607 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2608 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2609 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2610 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2611 * not perform correct translation if this happens. See Intel spec. 7.2.1
2612 * "Task-State Segment" */
2613 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2614 if (rcStrict != VINF_SUCCESS)
2615 {
2616 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2617 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2618 return rcStrict;
2619 }
2620
2621 /*
2622 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2623 */
2624 uint32_t u32EFlags = pCtx->eflags.u32;
2625 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2626 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2627 {
2628 PX86DESC pDescCurTSS;
2629 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2630 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2631 if (rcStrict != VINF_SUCCESS)
2632 {
2633 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2634 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2635 return rcStrict;
2636 }
2637
2638 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2639 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2640 if (rcStrict != VINF_SUCCESS)
2641 {
2642 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2643 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2644 return rcStrict;
2645 }
2646
2647 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2648 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2649 {
2650 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2651 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2652 u32EFlags &= ~X86_EFL_NT;
2653 }
2654 }
2655
2656 /*
2657 * Save the CPU state into the current TSS.
2658 */
2659 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2660 if (GCPtrNewTSS == GCPtrCurTSS)
2661 {
2662 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2663 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2664 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2665 }
2666 if (fIsNewTSS386)
2667 {
2668 /*
2669 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2670 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2671 */
2672 void *pvCurTSS32;
2673 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2674 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2675 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2676 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2677 if (rcStrict != VINF_SUCCESS)
2678 {
2679 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2680 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2681 return rcStrict;
2682 }
2683
2684 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2685 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2686 pCurTSS32->eip = uNextEip;
2687 pCurTSS32->eflags = u32EFlags;
2688 pCurTSS32->eax = pCtx->eax;
2689 pCurTSS32->ecx = pCtx->ecx;
2690 pCurTSS32->edx = pCtx->edx;
2691 pCurTSS32->ebx = pCtx->ebx;
2692 pCurTSS32->esp = pCtx->esp;
2693 pCurTSS32->ebp = pCtx->ebp;
2694 pCurTSS32->esi = pCtx->esi;
2695 pCurTSS32->edi = pCtx->edi;
2696 pCurTSS32->es = pCtx->es.Sel;
2697 pCurTSS32->cs = pCtx->cs.Sel;
2698 pCurTSS32->ss = pCtx->ss.Sel;
2699 pCurTSS32->ds = pCtx->ds.Sel;
2700 pCurTSS32->fs = pCtx->fs.Sel;
2701 pCurTSS32->gs = pCtx->gs.Sel;
2702
2703 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2704 if (rcStrict != VINF_SUCCESS)
2705 {
2706 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2707 VBOXSTRICTRC_VAL(rcStrict)));
2708 return rcStrict;
2709 }
2710 }
2711 else
2712 {
2713 /*
2714 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2715 */
2716 void *pvCurTSS16;
2717 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2718 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2719 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2720 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2721 if (rcStrict != VINF_SUCCESS)
2722 {
2723 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2724 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2725 return rcStrict;
2726 }
2727
2728 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2729 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2730 pCurTSS16->ip = uNextEip;
2731 pCurTSS16->flags = u32EFlags;
2732 pCurTSS16->ax = pCtx->ax;
2733 pCurTSS16->cx = pCtx->cx;
2734 pCurTSS16->dx = pCtx->dx;
2735 pCurTSS16->bx = pCtx->bx;
2736 pCurTSS16->sp = pCtx->sp;
2737 pCurTSS16->bp = pCtx->bp;
2738 pCurTSS16->si = pCtx->si;
2739 pCurTSS16->di = pCtx->di;
2740 pCurTSS16->es = pCtx->es.Sel;
2741 pCurTSS16->cs = pCtx->cs.Sel;
2742 pCurTSS16->ss = pCtx->ss.Sel;
2743 pCurTSS16->ds = pCtx->ds.Sel;
2744
2745 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2746 if (rcStrict != VINF_SUCCESS)
2747 {
2748 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2749 VBOXSTRICTRC_VAL(rcStrict)));
2750 return rcStrict;
2751 }
2752 }
2753
2754 /*
2755 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2756 */
2757 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2758 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2759 {
2760 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2761 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2762 pNewTSS->selPrev = pCtx->tr.Sel;
2763 }
2764
2765 /*
2766 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2767 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2768 */
2769 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2770 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2771 bool fNewDebugTrap;
2772 if (fIsNewTSS386)
2773 {
2774 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2775 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2776 uNewEip = pNewTSS32->eip;
2777 uNewEflags = pNewTSS32->eflags;
2778 uNewEax = pNewTSS32->eax;
2779 uNewEcx = pNewTSS32->ecx;
2780 uNewEdx = pNewTSS32->edx;
2781 uNewEbx = pNewTSS32->ebx;
2782 uNewEsp = pNewTSS32->esp;
2783 uNewEbp = pNewTSS32->ebp;
2784 uNewEsi = pNewTSS32->esi;
2785 uNewEdi = pNewTSS32->edi;
2786 uNewES = pNewTSS32->es;
2787 uNewCS = pNewTSS32->cs;
2788 uNewSS = pNewTSS32->ss;
2789 uNewDS = pNewTSS32->ds;
2790 uNewFS = pNewTSS32->fs;
2791 uNewGS = pNewTSS32->gs;
2792 uNewLdt = pNewTSS32->selLdt;
2793 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2794 }
2795 else
2796 {
2797 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2798 uNewCr3 = 0;
2799 uNewEip = pNewTSS16->ip;
2800 uNewEflags = pNewTSS16->flags;
2801 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2802 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2803 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2804 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2805 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2806 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2807 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2808 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2809 uNewES = pNewTSS16->es;
2810 uNewCS = pNewTSS16->cs;
2811 uNewSS = pNewTSS16->ss;
2812 uNewDS = pNewTSS16->ds;
2813 uNewFS = 0;
2814 uNewGS = 0;
2815 uNewLdt = pNewTSS16->selLdt;
2816 fNewDebugTrap = false;
2817 }
2818
2819 if (GCPtrNewTSS == GCPtrCurTSS)
2820 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2821 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2822
2823 /*
2824 * We're done accessing the new TSS.
2825 */
2826 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2827 if (rcStrict != VINF_SUCCESS)
2828 {
2829 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2830 return rcStrict;
2831 }
2832
2833 /*
2834 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2835 */
2836 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2837 {
2838 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2839 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2840 if (rcStrict != VINF_SUCCESS)
2841 {
2842 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2843 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2844 return rcStrict;
2845 }
2846
2847 /* Check that the descriptor indicates the new TSS is available (not busy). */
2848 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2849 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2850 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2851
2852 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2853 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2854 if (rcStrict != VINF_SUCCESS)
2855 {
2856 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2857 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2858 return rcStrict;
2859 }
2860 }
2861
2862 /*
2863 * From this point on, we're technically in the new task. We will defer exceptions
2864 * until the completion of the task switch but before executing any instructions in the new task.
2865 */
2866 pCtx->tr.Sel = SelTSS;
2867 pCtx->tr.ValidSel = SelTSS;
2868 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2869 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2870 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2871 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2872 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2873
2874 /* Set the busy bit in TR. */
2875 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2876 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2877 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2878 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2879 {
2880 uNewEflags |= X86_EFL_NT;
2881 }
2882
2883 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2884 pCtx->cr0 |= X86_CR0_TS;
2885 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2886
2887 pCtx->eip = uNewEip;
2888 pCtx->eax = uNewEax;
2889 pCtx->ecx = uNewEcx;
2890 pCtx->edx = uNewEdx;
2891 pCtx->ebx = uNewEbx;
2892 pCtx->esp = uNewEsp;
2893 pCtx->ebp = uNewEbp;
2894 pCtx->esi = uNewEsi;
2895 pCtx->edi = uNewEdi;
2896
2897 uNewEflags &= X86_EFL_LIVE_MASK;
2898 uNewEflags |= X86_EFL_RA1_MASK;
2899 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2900
2901 /*
2902 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2903 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2904 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2905 */
2906 pCtx->es.Sel = uNewES;
2907 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2908 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2909
2910 pCtx->cs.Sel = uNewCS;
2911 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2912 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2913
2914 pCtx->ss.Sel = uNewSS;
2915 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2916 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2917
2918 pCtx->ds.Sel = uNewDS;
2919 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2920 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2921
2922 pCtx->fs.Sel = uNewFS;
2923 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2924 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2925
2926 pCtx->gs.Sel = uNewGS;
2927 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2928 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2929 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2930
2931 pCtx->ldtr.Sel = uNewLdt;
2932 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2933 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2934 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2935
2936 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2937 {
2938 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2939 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2940 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2941 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2942 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2943 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2944 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2945 }
2946
2947 /*
2948 * Switch CR3 for the new task.
2949 */
2950 if ( fIsNewTSS386
2951 && (pCtx->cr0 & X86_CR0_PG))
2952 {
2953 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2954 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2955 {
2956 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2957 AssertRCSuccessReturn(rc, rc);
2958 }
2959 else
2960 pCtx->cr3 = uNewCr3;
2961
2962 /* Inform PGM. */
2963 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2964 {
2965 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2966 AssertRCReturn(rc, rc);
2967 /* ignore informational status codes */
2968 }
2969 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2970 }
2971
2972 /*
2973 * Switch LDTR for the new task.
2974 */
2975 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2976 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2977 else
2978 {
2979 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2980
2981 IEMSELDESC DescNewLdt;
2982 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2983 if (rcStrict != VINF_SUCCESS)
2984 {
2985 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2986 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2987 return rcStrict;
2988 }
2989 if ( !DescNewLdt.Legacy.Gen.u1Present
2990 || DescNewLdt.Legacy.Gen.u1DescType
2991 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2992 {
2993 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2994 uNewLdt, DescNewLdt.Legacy.u));
2995 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2996 }
2997
2998 pCtx->ldtr.ValidSel = uNewLdt;
2999 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3000 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3001 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3002 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3003 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3004 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3005 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
3006 }
3007
3008 IEMSELDESC DescSS;
3009 if (IEM_IS_V86_MODE(pIemCpu))
3010 {
3011 pIemCpu->uCpl = 3;
3012 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
3013 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
3014 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
3015 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
3016 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
3017 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
3018 }
3019 else
3020 {
3021 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
3022
3023 /*
3024 * Load the stack segment for the new task.
3025 */
3026 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3027 {
3028 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3029 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3030 }
3031
3032 /* Fetch the descriptor. */
3033 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
3034 if (rcStrict != VINF_SUCCESS)
3035 {
3036 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3037 VBOXSTRICTRC_VAL(rcStrict)));
3038 return rcStrict;
3039 }
3040
3041 /* SS must be a data segment and writable. */
3042 if ( !DescSS.Legacy.Gen.u1DescType
3043 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3044 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3045 {
3046 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3047 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3048 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3049 }
3050
3051 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3052 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3053 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3054 {
3055 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3056 uNewCpl));
3057 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3058 }
3059
3060 /* Is it there? */
3061 if (!DescSS.Legacy.Gen.u1Present)
3062 {
3063 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3064 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3065 }
3066
3067 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3068 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3069
3070 /* Set the accessed bit before committing the result into SS. */
3071 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3072 {
3073 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3074 if (rcStrict != VINF_SUCCESS)
3075 return rcStrict;
3076 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3077 }
3078
3079 /* Commit SS. */
3080 pCtx->ss.Sel = uNewSS;
3081 pCtx->ss.ValidSel = uNewSS;
3082 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3083 pCtx->ss.u32Limit = cbLimit;
3084 pCtx->ss.u64Base = u64Base;
3085 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3086 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3087
3088 /* CPL has changed, update IEM before loading rest of segments. */
3089 pIemCpu->uCpl = uNewCpl;
3090
3091 /*
3092 * Load the data segments for the new task.
3093 */
3094 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3095 if (rcStrict != VINF_SUCCESS)
3096 return rcStrict;
3097 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3098 if (rcStrict != VINF_SUCCESS)
3099 return rcStrict;
3100 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3101 if (rcStrict != VINF_SUCCESS)
3102 return rcStrict;
3103 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3104 if (rcStrict != VINF_SUCCESS)
3105 return rcStrict;
3106
3107 /*
3108 * Load the code segment for the new task.
3109 */
3110 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3111 {
3112 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3113 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3114 }
3115
3116 /* Fetch the descriptor. */
3117 IEMSELDESC DescCS;
3118 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3119 if (rcStrict != VINF_SUCCESS)
3120 {
3121 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3122 return rcStrict;
3123 }
3124
3125 /* CS must be a code segment. */
3126 if ( !DescCS.Legacy.Gen.u1DescType
3127 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3128 {
3129 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3130 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3131 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3132 }
3133
3134 /* For conforming CS, DPL must be less than or equal to the RPL. */
3135 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3136 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3137 {
3138 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3139 DescCS.Legacy.Gen.u2Dpl));
3140 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3141 }
3142
3143 /* For non-conforming CS, DPL must match RPL. */
3144 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3145 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3146 {
3147 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3148 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3149 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3150 }
3151
3152 /* Is it there? */
3153 if (!DescCS.Legacy.Gen.u1Present)
3154 {
3155 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3156 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3157 }
3158
3159 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3160 u64Base = X86DESC_BASE(&DescCS.Legacy);
3161
3162 /* Set the accessed bit before committing the result into CS. */
3163 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3164 {
3165 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3166 if (rcStrict != VINF_SUCCESS)
3167 return rcStrict;
3168 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3169 }
3170
3171 /* Commit CS. */
3172 pCtx->cs.Sel = uNewCS;
3173 pCtx->cs.ValidSel = uNewCS;
3174 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3175 pCtx->cs.u32Limit = cbLimit;
3176 pCtx->cs.u64Base = u64Base;
3177 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3178 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3179 }
3180
3181 /** @todo Debug trap. */
3182 if (fIsNewTSS386 && fNewDebugTrap)
3183 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3184
3185 /*
3186 * Construct the error code masks based on what caused this task switch.
3187 * See Intel Instruction reference for INT.
3188 */
3189 uint16_t uExt;
3190 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3191 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3192 {
3193 uExt = 1;
3194 }
3195 else
3196 uExt = 0;
3197
3198 /*
3199 * Push any error code on to the new stack.
3200 */
3201 if (fFlags & IEM_XCPT_FLAGS_ERR)
3202 {
3203 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3204 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3205 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3206
3207 /* Check that there is sufficient space on the stack. */
3208 /** @todo Factor out segment limit checking for normal/expand down segments
3209 * into a separate function. */
3210 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3211 {
3212 if ( pCtx->esp - 1 > cbLimitSS
3213 || pCtx->esp < cbStackFrame)
3214 {
3215 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3216 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3217 cbStackFrame));
3218 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3219 }
3220 }
3221 else
3222 {
3223 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3224 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3225 {
3226 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3227 cbStackFrame));
3228 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3229 }
3230 }
3231
3232
3233 if (fIsNewTSS386)
3234 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3235 else
3236 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3237 if (rcStrict != VINF_SUCCESS)
3238 {
3239 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3240 VBOXSTRICTRC_VAL(rcStrict)));
3241 return rcStrict;
3242 }
3243 }
3244
3245 /* Check the new EIP against the new CS limit. */
3246 if (pCtx->eip > pCtx->cs.u32Limit)
3247 {
3248 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3249 pCtx->eip, pCtx->cs.u32Limit));
3250 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3251 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3252 }
3253
3254 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3255 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3256}
3257
3258
3259/**
3260 * Implements exceptions and interrupts for protected mode.
3261 *
3262 * @returns VBox strict status code.
3263 * @param pIemCpu The IEM per CPU instance data.
3264 * @param pCtx The CPU context.
3265 * @param cbInstr The number of bytes to offset rIP by in the return
3266 * address.
3267 * @param u8Vector The interrupt / exception vector number.
3268 * @param fFlags The flags.
3269 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3270 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3271 */
3272IEM_STATIC VBOXSTRICTRC
3273iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3274 PCPUMCTX pCtx,
3275 uint8_t cbInstr,
3276 uint8_t u8Vector,
3277 uint32_t fFlags,
3278 uint16_t uErr,
3279 uint64_t uCr2)
3280{
3281 /*
3282 * Read the IDT entry.
3283 */
3284 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3285 {
3286 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3287 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3288 }
3289 X86DESC Idte;
3290 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3291 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3292 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3293 return rcStrict;
3294 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3295 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3296 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3297
3298 /*
3299 * Check the descriptor type, DPL and such.
3300 * ASSUMES this is done in the same order as described for call-gate calls.
3301 */
3302 if (Idte.Gate.u1DescType)
3303 {
3304 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3305 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3306 }
3307 bool fTaskGate = false;
3308 uint8_t f32BitGate = true;
3309 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3310 switch (Idte.Gate.u4Type)
3311 {
3312 case X86_SEL_TYPE_SYS_UNDEFINED:
3313 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3314 case X86_SEL_TYPE_SYS_LDT:
3315 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3316 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3317 case X86_SEL_TYPE_SYS_UNDEFINED2:
3318 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3319 case X86_SEL_TYPE_SYS_UNDEFINED3:
3320 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3321 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3322 case X86_SEL_TYPE_SYS_UNDEFINED4:
3323 {
3324 /** @todo check what actually happens when the type is wrong...
3325 * esp. call gates. */
3326 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3327 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3328 }
3329
3330 case X86_SEL_TYPE_SYS_286_INT_GATE:
3331 f32BitGate = false;
3332 case X86_SEL_TYPE_SYS_386_INT_GATE:
3333 fEflToClear |= X86_EFL_IF;
3334 break;
3335
3336 case X86_SEL_TYPE_SYS_TASK_GATE:
3337 fTaskGate = true;
3338#ifndef IEM_IMPLEMENTS_TASKSWITCH
3339 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3340#endif
3341 break;
3342
3343 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3344 f32BitGate = false;
3345 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3346 break;
3347
3348 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3349 }
3350
3351 /* Check DPL against CPL if applicable. */
3352 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3353 {
3354 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3355 {
3356 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3357 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3358 }
3359 }
3360
3361 /* Is it there? */
3362 if (!Idte.Gate.u1Present)
3363 {
3364 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3365 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3366 }
3367
3368 /* Is it a task-gate? */
3369 if (fTaskGate)
3370 {
3371 /*
3372 * Construct the error code masks based on what caused this task switch.
3373 * See Intel Instruction reference for INT.
3374 */
3375 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3376 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3377 RTSEL SelTSS = Idte.Gate.u16Sel;
3378
3379 /*
3380 * Fetch the TSS descriptor in the GDT.
3381 */
3382 IEMSELDESC DescTSS;
3383 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3384 if (rcStrict != VINF_SUCCESS)
3385 {
3386 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3387 VBOXSTRICTRC_VAL(rcStrict)));
3388 return rcStrict;
3389 }
3390
3391 /* The TSS descriptor must be a system segment and be available (not busy). */
3392 if ( DescTSS.Legacy.Gen.u1DescType
3393 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3394 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3395 {
3396 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3397 u8Vector, SelTSS, DescTSS.Legacy.au64));
3398 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3399 }
3400
3401 /* The TSS must be present. */
3402 if (!DescTSS.Legacy.Gen.u1Present)
3403 {
3404 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3405 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3406 }
3407
3408 /* Do the actual task switch. */
3409 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3410 }
3411
3412 /* A null CS is bad. */
3413 RTSEL NewCS = Idte.Gate.u16Sel;
3414 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3415 {
3416 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3417 return iemRaiseGeneralProtectionFault0(pIemCpu);
3418 }
3419
3420 /* Fetch the descriptor for the new CS. */
3421 IEMSELDESC DescCS;
3422 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3423 if (rcStrict != VINF_SUCCESS)
3424 {
3425 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3426 return rcStrict;
3427 }
3428
3429 /* Must be a code segment. */
3430 if (!DescCS.Legacy.Gen.u1DescType)
3431 {
3432 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3433 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3434 }
3435 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3436 {
3437 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3438 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3439 }
3440
3441 /* Don't allow lowering the privilege level. */
3442 /** @todo Does the lowering of privileges apply to software interrupts
3443 * only? This has bearings on the more-privileged or
3444 * same-privilege stack behavior further down. A testcase would
3445 * be nice. */
3446 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3447 {
3448 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3449 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3450 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3451 }
3452
3453 /* Make sure the selector is present. */
3454 if (!DescCS.Legacy.Gen.u1Present)
3455 {
3456 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3457 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3458 }
3459
3460 /* Check the new EIP against the new CS limit. */
3461 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3462 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3463 ? Idte.Gate.u16OffsetLow
3464 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3465 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3466 if (uNewEip > cbLimitCS)
3467 {
3468 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3469 u8Vector, uNewEip, cbLimitCS, NewCS));
3470 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3471 }
3472
3473 /* Calc the flag image to push. */
3474 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3475 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3476 fEfl &= ~X86_EFL_RF;
3477 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3478 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3479
3480 /* From V8086 mode only go to CPL 0. */
3481 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3482 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3483 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3484 {
3485 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3486 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3487 }
3488
3489 /*
3490 * If the privilege level changes, we need to get a new stack from the TSS.
3491 * This in turns means validating the new SS and ESP...
3492 */
3493 if (uNewCpl != pIemCpu->uCpl)
3494 {
3495 RTSEL NewSS;
3496 uint32_t uNewEsp;
3497 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3498 if (rcStrict != VINF_SUCCESS)
3499 return rcStrict;
3500
3501 IEMSELDESC DescSS;
3502 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3503 if (rcStrict != VINF_SUCCESS)
3504 return rcStrict;
3505
3506 /* Check that there is sufficient space for the stack frame. */
3507 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3508 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3509 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3510 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3511
3512 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3513 {
3514 if ( uNewEsp - 1 > cbLimitSS
3515 || uNewEsp < cbStackFrame)
3516 {
3517 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3518 u8Vector, NewSS, uNewEsp, cbStackFrame));
3519 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3520 }
3521 }
3522 else
3523 {
3524 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3525 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3526 {
3527 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3528 u8Vector, NewSS, uNewEsp, cbStackFrame));
3529 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3530 }
3531 }
3532
3533 /*
3534 * Start making changes.
3535 */
3536
3537 /* Set the new CPL so that stack accesses use it. */
3538 uint8_t const uOldCpl = pIemCpu->uCpl;
3539 pIemCpu->uCpl = uNewCpl;
3540
3541 /* Create the stack frame. */
3542 RTPTRUNION uStackFrame;
3543 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3544 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3545 if (rcStrict != VINF_SUCCESS)
3546 return rcStrict;
3547 void * const pvStackFrame = uStackFrame.pv;
3548 if (f32BitGate)
3549 {
3550 if (fFlags & IEM_XCPT_FLAGS_ERR)
3551 *uStackFrame.pu32++ = uErr;
3552 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3553 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3554 uStackFrame.pu32[2] = fEfl;
3555 uStackFrame.pu32[3] = pCtx->esp;
3556 uStackFrame.pu32[4] = pCtx->ss.Sel;
3557 if (fEfl & X86_EFL_VM)
3558 {
3559 uStackFrame.pu32[1] = pCtx->cs.Sel;
3560 uStackFrame.pu32[5] = pCtx->es.Sel;
3561 uStackFrame.pu32[6] = pCtx->ds.Sel;
3562 uStackFrame.pu32[7] = pCtx->fs.Sel;
3563 uStackFrame.pu32[8] = pCtx->gs.Sel;
3564 }
3565 }
3566 else
3567 {
3568 if (fFlags & IEM_XCPT_FLAGS_ERR)
3569 *uStackFrame.pu16++ = uErr;
3570 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3571 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3572 uStackFrame.pu16[2] = fEfl;
3573 uStackFrame.pu16[3] = pCtx->sp;
3574 uStackFrame.pu16[4] = pCtx->ss.Sel;
3575 if (fEfl & X86_EFL_VM)
3576 {
3577 uStackFrame.pu16[1] = pCtx->cs.Sel;
3578 uStackFrame.pu16[5] = pCtx->es.Sel;
3579 uStackFrame.pu16[6] = pCtx->ds.Sel;
3580 uStackFrame.pu16[7] = pCtx->fs.Sel;
3581 uStackFrame.pu16[8] = pCtx->gs.Sel;
3582 }
3583 }
3584 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3585 if (rcStrict != VINF_SUCCESS)
3586 return rcStrict;
3587
3588 /* Mark the selectors 'accessed' (hope this is the correct time). */
3589 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3590 * after pushing the stack frame? (Write protect the gdt + stack to
3591 * find out.) */
3592 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3593 {
3594 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3595 if (rcStrict != VINF_SUCCESS)
3596 return rcStrict;
3597 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3598 }
3599
3600 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3601 {
3602 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3603 if (rcStrict != VINF_SUCCESS)
3604 return rcStrict;
3605 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3606 }
3607
3608 /*
3609 * Start comitting the register changes (joins with the DPL=CPL branch).
3610 */
3611 pCtx->ss.Sel = NewSS;
3612 pCtx->ss.ValidSel = NewSS;
3613 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3614 pCtx->ss.u32Limit = cbLimitSS;
3615 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3616 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3617 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3618 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3619 * SP is loaded).
3620 * Need to check the other combinations too:
3621 * - 16-bit TSS, 32-bit handler
3622 * - 32-bit TSS, 16-bit handler */
3623 if (!pCtx->ss.Attr.n.u1DefBig)
3624 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
3625 else
3626 pCtx->rsp = uNewEsp - cbStackFrame;
3627
3628 if (fEfl & X86_EFL_VM)
3629 {
3630 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3631 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3632 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3633 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3634 }
3635 }
3636 /*
3637 * Same privilege, no stack change and smaller stack frame.
3638 */
3639 else
3640 {
3641 uint64_t uNewRsp;
3642 RTPTRUNION uStackFrame;
3643 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3644 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3645 if (rcStrict != VINF_SUCCESS)
3646 return rcStrict;
3647 void * const pvStackFrame = uStackFrame.pv;
3648
3649 if (f32BitGate)
3650 {
3651 if (fFlags & IEM_XCPT_FLAGS_ERR)
3652 *uStackFrame.pu32++ = uErr;
3653 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3654 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3655 uStackFrame.pu32[2] = fEfl;
3656 }
3657 else
3658 {
3659 if (fFlags & IEM_XCPT_FLAGS_ERR)
3660 *uStackFrame.pu16++ = uErr;
3661 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3662 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3663 uStackFrame.pu16[2] = fEfl;
3664 }
3665 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3666 if (rcStrict != VINF_SUCCESS)
3667 return rcStrict;
3668
3669 /* Mark the CS selector as 'accessed'. */
3670 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3671 {
3672 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3673 if (rcStrict != VINF_SUCCESS)
3674 return rcStrict;
3675 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3676 }
3677
3678 /*
3679 * Start committing the register changes (joins with the other branch).
3680 */
3681 pCtx->rsp = uNewRsp;
3682 }
3683
3684 /* ... register committing continues. */
3685 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3686 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3687 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3688 pCtx->cs.u32Limit = cbLimitCS;
3689 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3690 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3691
3692 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3693 fEfl &= ~fEflToClear;
3694 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3695
3696 if (fFlags & IEM_XCPT_FLAGS_CR2)
3697 pCtx->cr2 = uCr2;
3698
3699 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3700 iemRaiseXcptAdjustState(pCtx, u8Vector);
3701
3702 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3703}
3704
3705
3706/**
3707 * Implements exceptions and interrupts for long mode.
3708 *
3709 * @returns VBox strict status code.
3710 * @param pIemCpu The IEM per CPU instance data.
3711 * @param pCtx The CPU context.
3712 * @param cbInstr The number of bytes to offset rIP by in the return
3713 * address.
3714 * @param u8Vector The interrupt / exception vector number.
3715 * @param fFlags The flags.
3716 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3717 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3718 */
3719IEM_STATIC VBOXSTRICTRC
3720iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3721 PCPUMCTX pCtx,
3722 uint8_t cbInstr,
3723 uint8_t u8Vector,
3724 uint32_t fFlags,
3725 uint16_t uErr,
3726 uint64_t uCr2)
3727{
3728 /*
3729 * Read the IDT entry.
3730 */
3731 uint16_t offIdt = (uint16_t)u8Vector << 4;
3732 if (pCtx->idtr.cbIdt < offIdt + 7)
3733 {
3734 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3735 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3736 }
3737 X86DESC64 Idte;
3738 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3739 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3740 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3741 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3742 return rcStrict;
3743 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3744 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3745 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3746
3747 /*
3748 * Check the descriptor type, DPL and such.
3749 * ASSUMES this is done in the same order as described for call-gate calls.
3750 */
3751 if (Idte.Gate.u1DescType)
3752 {
3753 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3754 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3755 }
3756 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3757 switch (Idte.Gate.u4Type)
3758 {
3759 case AMD64_SEL_TYPE_SYS_INT_GATE:
3760 fEflToClear |= X86_EFL_IF;
3761 break;
3762 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3763 break;
3764
3765 default:
3766 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3767 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3768 }
3769
3770 /* Check DPL against CPL if applicable. */
3771 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3772 {
3773 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3774 {
3775 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3776 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3777 }
3778 }
3779
3780 /* Is it there? */
3781 if (!Idte.Gate.u1Present)
3782 {
3783 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3784 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3785 }
3786
3787 /* A null CS is bad. */
3788 RTSEL NewCS = Idte.Gate.u16Sel;
3789 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3790 {
3791 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3792 return iemRaiseGeneralProtectionFault0(pIemCpu);
3793 }
3794
3795 /* Fetch the descriptor for the new CS. */
3796 IEMSELDESC DescCS;
3797 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3798 if (rcStrict != VINF_SUCCESS)
3799 {
3800 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3801 return rcStrict;
3802 }
3803
3804 /* Must be a 64-bit code segment. */
3805 if (!DescCS.Long.Gen.u1DescType)
3806 {
3807 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3808 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3809 }
3810 if ( !DescCS.Long.Gen.u1Long
3811 || DescCS.Long.Gen.u1DefBig
3812 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3813 {
3814 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3815 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3816 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3817 }
3818
3819 /* Don't allow lowering the privilege level. For non-conforming CS
3820 selectors, the CS.DPL sets the privilege level the trap/interrupt
3821 handler runs at. For conforming CS selectors, the CPL remains
3822 unchanged, but the CS.DPL must be <= CPL. */
3823 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3824 * when CPU in Ring-0. Result \#GP? */
3825 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3826 {
3827 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3828 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3829 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3830 }
3831
3832
3833 /* Make sure the selector is present. */
3834 if (!DescCS.Legacy.Gen.u1Present)
3835 {
3836 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3837 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3838 }
3839
3840 /* Check that the new RIP is canonical. */
3841 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3842 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3843 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3844 if (!IEM_IS_CANONICAL(uNewRip))
3845 {
3846 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3847 return iemRaiseGeneralProtectionFault0(pIemCpu);
3848 }
3849
3850 /*
3851 * If the privilege level changes or if the IST isn't zero, we need to get
3852 * a new stack from the TSS.
3853 */
3854 uint64_t uNewRsp;
3855 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3856 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3857 if ( uNewCpl != pIemCpu->uCpl
3858 || Idte.Gate.u3IST != 0)
3859 {
3860 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3861 if (rcStrict != VINF_SUCCESS)
3862 return rcStrict;
3863 }
3864 else
3865 uNewRsp = pCtx->rsp;
3866 uNewRsp &= ~(uint64_t)0xf;
3867
3868 /*
3869 * Calc the flag image to push.
3870 */
3871 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3872 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3873 fEfl &= ~X86_EFL_RF;
3874 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3875 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3876
3877 /*
3878 * Start making changes.
3879 */
3880 /* Set the new CPL so that stack accesses use it. */
3881 uint8_t const uOldCpl = pIemCpu->uCpl;
3882 pIemCpu->uCpl = uNewCpl;
3883
3884 /* Create the stack frame. */
3885 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3886 RTPTRUNION uStackFrame;
3887 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3888 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3889 if (rcStrict != VINF_SUCCESS)
3890 return rcStrict;
3891 void * const pvStackFrame = uStackFrame.pv;
3892
3893 if (fFlags & IEM_XCPT_FLAGS_ERR)
3894 *uStackFrame.pu64++ = uErr;
3895 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3896 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3897 uStackFrame.pu64[2] = fEfl;
3898 uStackFrame.pu64[3] = pCtx->rsp;
3899 uStackFrame.pu64[4] = pCtx->ss.Sel;
3900 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3901 if (rcStrict != VINF_SUCCESS)
3902 return rcStrict;
3903
3904 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3905 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3906 * after pushing the stack frame? (Write protect the gdt + stack to
3907 * find out.) */
3908 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3909 {
3910 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3911 if (rcStrict != VINF_SUCCESS)
3912 return rcStrict;
3913 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3914 }
3915
3916 /*
3917 * Start comitting the register changes.
3918 */
3919 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3920 * hidden registers when interrupting 32-bit or 16-bit code! */
3921 if (uNewCpl != uOldCpl)
3922 {
3923 pCtx->ss.Sel = 0 | uNewCpl;
3924 pCtx->ss.ValidSel = 0 | uNewCpl;
3925 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3926 pCtx->ss.u32Limit = UINT32_MAX;
3927 pCtx->ss.u64Base = 0;
3928 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3929 }
3930 pCtx->rsp = uNewRsp - cbStackFrame;
3931 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3932 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3933 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3934 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3935 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3936 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3937 pCtx->rip = uNewRip;
3938
3939 fEfl &= ~fEflToClear;
3940 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3941
3942 if (fFlags & IEM_XCPT_FLAGS_CR2)
3943 pCtx->cr2 = uCr2;
3944
3945 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3946 iemRaiseXcptAdjustState(pCtx, u8Vector);
3947
3948 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3949}
3950
3951
3952/**
3953 * Implements exceptions and interrupts.
3954 *
3955 * All exceptions and interrupts goes thru this function!
3956 *
3957 * @returns VBox strict status code.
3958 * @param pIemCpu The IEM per CPU instance data.
3959 * @param cbInstr The number of bytes to offset rIP by in the return
3960 * address.
3961 * @param u8Vector The interrupt / exception vector number.
3962 * @param fFlags The flags.
3963 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3964 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3965 */
3966DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3967iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3968 uint8_t cbInstr,
3969 uint8_t u8Vector,
3970 uint32_t fFlags,
3971 uint16_t uErr,
3972 uint64_t uCr2)
3973{
3974 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3975#ifdef IN_RING0
3976 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3977 AssertRCReturn(rc, rc);
3978#endif
3979
3980 /*
3981 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3982 */
3983 if ( pCtx->eflags.Bits.u1VM
3984 && pCtx->eflags.Bits.u2IOPL != 3
3985 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3986 && (pCtx->cr0 & X86_CR0_PE) )
3987 {
3988 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3989 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3990 u8Vector = X86_XCPT_GP;
3991 uErr = 0;
3992 }
3993#ifdef DBGFTRACE_ENABLED
3994 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3995 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3996 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3997#endif
3998
3999 /*
4000 * Do recursion accounting.
4001 */
4002 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
4003 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
4004 if (pIemCpu->cXcptRecursions == 0)
4005 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4006 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
4007 else
4008 {
4009 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4010 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
4011
4012 /** @todo double and tripple faults. */
4013 if (pIemCpu->cXcptRecursions >= 3)
4014 {
4015#ifdef DEBUG_bird
4016 AssertFailed();
4017#endif
4018 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4019 }
4020
4021 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
4022 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
4023 {
4024 ....
4025 } */
4026 }
4027 pIemCpu->cXcptRecursions++;
4028 pIemCpu->uCurXcpt = u8Vector;
4029 pIemCpu->fCurXcpt = fFlags;
4030
4031 /*
4032 * Extensive logging.
4033 */
4034#if defined(LOG_ENABLED) && defined(IN_RING3)
4035 if (LogIs3Enabled())
4036 {
4037 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4038 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4039 char szRegs[4096];
4040 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4041 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4042 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4043 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4044 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4045 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4046 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4047 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4048 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4049 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4050 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4051 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4052 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4053 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4054 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4055 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4056 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4057 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4058 " efer=%016VR{efer}\n"
4059 " pat=%016VR{pat}\n"
4060 " sf_mask=%016VR{sf_mask}\n"
4061 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4062 " lstar=%016VR{lstar}\n"
4063 " star=%016VR{star} cstar=%016VR{cstar}\n"
4064 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4065 );
4066
4067 char szInstr[256];
4068 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4069 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4070 szInstr, sizeof(szInstr), NULL);
4071 Log3(("%s%s\n", szRegs, szInstr));
4072 }
4073#endif /* LOG_ENABLED */
4074
4075 /*
4076 * Call the mode specific worker function.
4077 */
4078 VBOXSTRICTRC rcStrict;
4079 if (!(pCtx->cr0 & X86_CR0_PE))
4080 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4081 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4082 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4083 else
4084 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4085
4086 /*
4087 * Unwind.
4088 */
4089 pIemCpu->cXcptRecursions--;
4090 pIemCpu->uCurXcpt = uPrevXcpt;
4091 pIemCpu->fCurXcpt = fPrevXcpt;
4092 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4093 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4094 return rcStrict;
4095}
4096
4097
4098/** \#DE - 00. */
4099DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4100{
4101 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4102}
4103
4104
4105/** \#DB - 01.
4106 * @note This automatically clear DR7.GD. */
4107DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4108{
4109 /** @todo set/clear RF. */
4110 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4111 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4112}
4113
4114
4115/** \#UD - 06. */
4116DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4117{
4118 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4119}
4120
4121
4122/** \#NM - 07. */
4123DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4124{
4125 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4126}
4127
4128
4129/** \#TS(err) - 0a. */
4130DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4131{
4132 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4133}
4134
4135
4136/** \#TS(tr) - 0a. */
4137DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4138{
4139 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4140 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4141}
4142
4143
4144/** \#TS(0) - 0a. */
4145DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4146{
4147 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4148 0, 0);
4149}
4150
4151
4152/** \#TS(err) - 0a. */
4153DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4154{
4155 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4156 uSel & X86_SEL_MASK_OFF_RPL, 0);
4157}
4158
4159
4160/** \#NP(err) - 0b. */
4161DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4162{
4163 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4164}
4165
4166
4167/** \#NP(seg) - 0b. */
4168DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4169{
4170 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4171 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4172}
4173
4174
4175/** \#NP(sel) - 0b. */
4176DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4177{
4178 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4179 uSel & ~X86_SEL_RPL, 0);
4180}
4181
4182
4183/** \#SS(seg) - 0c. */
4184DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4185{
4186 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4187 uSel & ~X86_SEL_RPL, 0);
4188}
4189
4190
4191/** \#SS(err) - 0c. */
4192DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4193{
4194 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4195}
4196
4197
4198/** \#GP(n) - 0d. */
4199DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4200{
4201 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4202}
4203
4204
4205/** \#GP(0) - 0d. */
4206DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4207{
4208 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4209}
4210
4211
4212/** \#GP(sel) - 0d. */
4213DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4214{
4215 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4216 Sel & ~X86_SEL_RPL, 0);
4217}
4218
4219
4220/** \#GP(0) - 0d. */
4221DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4222{
4223 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4224}
4225
4226
4227/** \#GP(sel) - 0d. */
4228DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4229{
4230 NOREF(iSegReg); NOREF(fAccess);
4231 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4232 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4233}
4234
4235
4236/** \#GP(sel) - 0d. */
4237DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4238{
4239 NOREF(Sel);
4240 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4241}
4242
4243
4244/** \#GP(sel) - 0d. */
4245DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4246{
4247 NOREF(iSegReg); NOREF(fAccess);
4248 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4249}
4250
4251
4252/** \#PF(n) - 0e. */
4253DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4254{
4255 uint16_t uErr;
4256 switch (rc)
4257 {
4258 case VERR_PAGE_NOT_PRESENT:
4259 case VERR_PAGE_TABLE_NOT_PRESENT:
4260 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4261 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4262 uErr = 0;
4263 break;
4264
4265 default:
4266 AssertMsgFailed(("%Rrc\n", rc));
4267 case VERR_ACCESS_DENIED:
4268 uErr = X86_TRAP_PF_P;
4269 break;
4270
4271 /** @todo reserved */
4272 }
4273
4274 if (pIemCpu->uCpl == 3)
4275 uErr |= X86_TRAP_PF_US;
4276
4277 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4278 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4279 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4280 uErr |= X86_TRAP_PF_ID;
4281
4282#if 0 /* This is so much non-sense, really. Why was it done like that? */
4283 /* Note! RW access callers reporting a WRITE protection fault, will clear
4284 the READ flag before calling. So, read-modify-write accesses (RW)
4285 can safely be reported as READ faults. */
4286 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4287 uErr |= X86_TRAP_PF_RW;
4288#else
4289 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4290 {
4291 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4292 uErr |= X86_TRAP_PF_RW;
4293 }
4294#endif
4295
4296 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4297 uErr, GCPtrWhere);
4298}
4299
4300
4301/** \#MF(0) - 10. */
4302DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4303{
4304 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4305}
4306
4307
4308/** \#AC(0) - 11. */
4309DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4310{
4311 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4312}
4313
4314
4315/**
4316 * Macro for calling iemCImplRaiseDivideError().
4317 *
4318 * This enables us to add/remove arguments and force different levels of
4319 * inlining as we wish.
4320 *
4321 * @return Strict VBox status code.
4322 */
4323#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4324IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4325{
4326 NOREF(cbInstr);
4327 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4328}
4329
4330
4331/**
4332 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4333 *
4334 * This enables us to add/remove arguments and force different levels of
4335 * inlining as we wish.
4336 *
4337 * @return Strict VBox status code.
4338 */
4339#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4340IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4341{
4342 NOREF(cbInstr);
4343 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4344}
4345
4346
4347/**
4348 * Macro for calling iemCImplRaiseInvalidOpcode().
4349 *
4350 * This enables us to add/remove arguments and force different levels of
4351 * inlining as we wish.
4352 *
4353 * @return Strict VBox status code.
4354 */
4355#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4356IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4357{
4358 NOREF(cbInstr);
4359 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4360}
4361
4362
4363/** @} */
4364
4365
4366/*
4367 *
4368 * Helpers routines.
4369 * Helpers routines.
4370 * Helpers routines.
4371 *
4372 */
4373
4374/**
4375 * Recalculates the effective operand size.
4376 *
4377 * @param pIemCpu The IEM state.
4378 */
4379IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4380{
4381 switch (pIemCpu->enmCpuMode)
4382 {
4383 case IEMMODE_16BIT:
4384 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4385 break;
4386 case IEMMODE_32BIT:
4387 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4388 break;
4389 case IEMMODE_64BIT:
4390 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4391 {
4392 case 0:
4393 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4394 break;
4395 case IEM_OP_PRF_SIZE_OP:
4396 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4397 break;
4398 case IEM_OP_PRF_SIZE_REX_W:
4399 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4400 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4401 break;
4402 }
4403 break;
4404 default:
4405 AssertFailed();
4406 }
4407}
4408
4409
4410/**
4411 * Sets the default operand size to 64-bit and recalculates the effective
4412 * operand size.
4413 *
4414 * @param pIemCpu The IEM state.
4415 */
4416IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4417{
4418 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4419 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4420 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4421 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4422 else
4423 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4424}
4425
4426
4427/*
4428 *
4429 * Common opcode decoders.
4430 * Common opcode decoders.
4431 * Common opcode decoders.
4432 *
4433 */
4434//#include <iprt/mem.h>
4435
4436/**
4437 * Used to add extra details about a stub case.
4438 * @param pIemCpu The IEM per CPU state.
4439 */
4440IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4441{
4442#if defined(LOG_ENABLED) && defined(IN_RING3)
4443 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4444 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4445 char szRegs[4096];
4446 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4447 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4448 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4449 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4450 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4451 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4452 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4453 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4454 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4455 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4456 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4457 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4458 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4459 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4460 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4461 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4462 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4463 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4464 " efer=%016VR{efer}\n"
4465 " pat=%016VR{pat}\n"
4466 " sf_mask=%016VR{sf_mask}\n"
4467 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4468 " lstar=%016VR{lstar}\n"
4469 " star=%016VR{star} cstar=%016VR{cstar}\n"
4470 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4471 );
4472
4473 char szInstr[256];
4474 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4475 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4476 szInstr, sizeof(szInstr), NULL);
4477
4478 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4479#else
4480 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4481#endif
4482}
4483
4484/**
4485 * Complains about a stub.
4486 *
4487 * Providing two versions of this macro, one for daily use and one for use when
4488 * working on IEM.
4489 */
4490#if 0
4491# define IEMOP_BITCH_ABOUT_STUB() \
4492 do { \
4493 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4494 iemOpStubMsg2(pIemCpu); \
4495 RTAssertPanic(); \
4496 } while (0)
4497#else
4498# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4499#endif
4500
4501/** Stubs an opcode. */
4502#define FNIEMOP_STUB(a_Name) \
4503 FNIEMOP_DEF(a_Name) \
4504 { \
4505 IEMOP_BITCH_ABOUT_STUB(); \
4506 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4507 } \
4508 typedef int ignore_semicolon
4509
4510/** Stubs an opcode. */
4511#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4512 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4513 { \
4514 IEMOP_BITCH_ABOUT_STUB(); \
4515 NOREF(a_Name0); \
4516 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4517 } \
4518 typedef int ignore_semicolon
4519
4520/** Stubs an opcode which currently should raise \#UD. */
4521#define FNIEMOP_UD_STUB(a_Name) \
4522 FNIEMOP_DEF(a_Name) \
4523 { \
4524 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4525 return IEMOP_RAISE_INVALID_OPCODE(); \
4526 } \
4527 typedef int ignore_semicolon
4528
4529/** Stubs an opcode which currently should raise \#UD. */
4530#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4531 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4532 { \
4533 NOREF(a_Name0); \
4534 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4535 return IEMOP_RAISE_INVALID_OPCODE(); \
4536 } \
4537 typedef int ignore_semicolon
4538
4539
4540
4541/** @name Register Access.
4542 * @{
4543 */
4544
4545/**
4546 * Gets a reference (pointer) to the specified hidden segment register.
4547 *
4548 * @returns Hidden register reference.
4549 * @param pIemCpu The per CPU data.
4550 * @param iSegReg The segment register.
4551 */
4552IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4553{
4554 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4555 PCPUMSELREG pSReg;
4556 switch (iSegReg)
4557 {
4558 case X86_SREG_ES: pSReg = &pCtx->es; break;
4559 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4560 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4561 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4562 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4563 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4564 default:
4565 AssertFailedReturn(NULL);
4566 }
4567#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4568 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4569 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4570#else
4571 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4572#endif
4573 return pSReg;
4574}
4575
4576
4577/**
4578 * Ensures that the given hidden segment register is up to date.
4579 *
4580 * @returns Hidden register reference.
4581 * @param pIemCpu The per CPU data.
4582 * @param pSReg The segment register.
4583 */
4584IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
4585{
4586#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4587 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4588 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4589#else
4590 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4591 NOREF(pIemCpu);
4592#endif
4593 return pSReg;
4594}
4595
4596
4597/**
4598 * Gets a reference (pointer) to the specified segment register (the selector
4599 * value).
4600 *
4601 * @returns Pointer to the selector variable.
4602 * @param pIemCpu The per CPU data.
4603 * @param iSegReg The segment register.
4604 */
4605IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4606{
4607 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4608 switch (iSegReg)
4609 {
4610 case X86_SREG_ES: return &pCtx->es.Sel;
4611 case X86_SREG_CS: return &pCtx->cs.Sel;
4612 case X86_SREG_SS: return &pCtx->ss.Sel;
4613 case X86_SREG_DS: return &pCtx->ds.Sel;
4614 case X86_SREG_FS: return &pCtx->fs.Sel;
4615 case X86_SREG_GS: return &pCtx->gs.Sel;
4616 }
4617 AssertFailedReturn(NULL);
4618}
4619
4620
4621/**
4622 * Fetches the selector value of a segment register.
4623 *
4624 * @returns The selector value.
4625 * @param pIemCpu The per CPU data.
4626 * @param iSegReg The segment register.
4627 */
4628IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4629{
4630 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4631 switch (iSegReg)
4632 {
4633 case X86_SREG_ES: return pCtx->es.Sel;
4634 case X86_SREG_CS: return pCtx->cs.Sel;
4635 case X86_SREG_SS: return pCtx->ss.Sel;
4636 case X86_SREG_DS: return pCtx->ds.Sel;
4637 case X86_SREG_FS: return pCtx->fs.Sel;
4638 case X86_SREG_GS: return pCtx->gs.Sel;
4639 }
4640 AssertFailedReturn(0xffff);
4641}
4642
4643
4644/**
4645 * Gets a reference (pointer) to the specified general register.
4646 *
4647 * @returns Register reference.
4648 * @param pIemCpu The per CPU data.
4649 * @param iReg The general register.
4650 */
4651IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4652{
4653 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4654 switch (iReg)
4655 {
4656 case X86_GREG_xAX: return &pCtx->rax;
4657 case X86_GREG_xCX: return &pCtx->rcx;
4658 case X86_GREG_xDX: return &pCtx->rdx;
4659 case X86_GREG_xBX: return &pCtx->rbx;
4660 case X86_GREG_xSP: return &pCtx->rsp;
4661 case X86_GREG_xBP: return &pCtx->rbp;
4662 case X86_GREG_xSI: return &pCtx->rsi;
4663 case X86_GREG_xDI: return &pCtx->rdi;
4664 case X86_GREG_x8: return &pCtx->r8;
4665 case X86_GREG_x9: return &pCtx->r9;
4666 case X86_GREG_x10: return &pCtx->r10;
4667 case X86_GREG_x11: return &pCtx->r11;
4668 case X86_GREG_x12: return &pCtx->r12;
4669 case X86_GREG_x13: return &pCtx->r13;
4670 case X86_GREG_x14: return &pCtx->r14;
4671 case X86_GREG_x15: return &pCtx->r15;
4672 }
4673 AssertFailedReturn(NULL);
4674}
4675
4676
4677/**
4678 * Gets a reference (pointer) to the specified 8-bit general register.
4679 *
4680 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4681 *
4682 * @returns Register reference.
4683 * @param pIemCpu The per CPU data.
4684 * @param iReg The register.
4685 */
4686IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4687{
4688 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4689 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4690
4691 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4692 if (iReg >= 4)
4693 pu8Reg++;
4694 return pu8Reg;
4695}
4696
4697
4698/**
4699 * Fetches the value of a 8-bit general register.
4700 *
4701 * @returns The register value.
4702 * @param pIemCpu The per CPU data.
4703 * @param iReg The register.
4704 */
4705IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4706{
4707 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4708 return *pbSrc;
4709}
4710
4711
4712/**
4713 * Fetches the value of a 16-bit general register.
4714 *
4715 * @returns The register value.
4716 * @param pIemCpu The per CPU data.
4717 * @param iReg The register.
4718 */
4719IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4720{
4721 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4722}
4723
4724
4725/**
4726 * Fetches the value of a 32-bit general register.
4727 *
4728 * @returns The register value.
4729 * @param pIemCpu The per CPU data.
4730 * @param iReg The register.
4731 */
4732IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4733{
4734 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4735}
4736
4737
4738/**
4739 * Fetches the value of a 64-bit general register.
4740 *
4741 * @returns The register value.
4742 * @param pIemCpu The per CPU data.
4743 * @param iReg The register.
4744 */
4745IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4746{
4747 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4748}
4749
4750
4751/**
4752 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4753 *
4754 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4755 * segment limit.
4756 *
4757 * @param pIemCpu The per CPU data.
4758 * @param offNextInstr The offset of the next instruction.
4759 */
4760IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4761{
4762 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4763 switch (pIemCpu->enmEffOpSize)
4764 {
4765 case IEMMODE_16BIT:
4766 {
4767 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4768 if ( uNewIp > pCtx->cs.u32Limit
4769 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4770 return iemRaiseGeneralProtectionFault0(pIemCpu);
4771 pCtx->rip = uNewIp;
4772 break;
4773 }
4774
4775 case IEMMODE_32BIT:
4776 {
4777 Assert(pCtx->rip <= UINT32_MAX);
4778 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4779
4780 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4781 if (uNewEip > pCtx->cs.u32Limit)
4782 return iemRaiseGeneralProtectionFault0(pIemCpu);
4783 pCtx->rip = uNewEip;
4784 break;
4785 }
4786
4787 case IEMMODE_64BIT:
4788 {
4789 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4790
4791 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4792 if (!IEM_IS_CANONICAL(uNewRip))
4793 return iemRaiseGeneralProtectionFault0(pIemCpu);
4794 pCtx->rip = uNewRip;
4795 break;
4796 }
4797
4798 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4799 }
4800
4801 pCtx->eflags.Bits.u1RF = 0;
4802 return VINF_SUCCESS;
4803}
4804
4805
4806/**
4807 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4808 *
4809 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4810 * segment limit.
4811 *
4812 * @returns Strict VBox status code.
4813 * @param pIemCpu The per CPU data.
4814 * @param offNextInstr The offset of the next instruction.
4815 */
4816IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4817{
4818 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4819 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4820
4821 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4822 if ( uNewIp > pCtx->cs.u32Limit
4823 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4824 return iemRaiseGeneralProtectionFault0(pIemCpu);
4825 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4826 pCtx->rip = uNewIp;
4827 pCtx->eflags.Bits.u1RF = 0;
4828
4829 return VINF_SUCCESS;
4830}
4831
4832
4833/**
4834 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4835 *
4836 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4837 * segment limit.
4838 *
4839 * @returns Strict VBox status code.
4840 * @param pIemCpu The per CPU data.
4841 * @param offNextInstr The offset of the next instruction.
4842 */
4843IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4844{
4845 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4846 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4847
4848 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4849 {
4850 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4851
4852 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4853 if (uNewEip > pCtx->cs.u32Limit)
4854 return iemRaiseGeneralProtectionFault0(pIemCpu);
4855 pCtx->rip = uNewEip;
4856 }
4857 else
4858 {
4859 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4860
4861 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4862 if (!IEM_IS_CANONICAL(uNewRip))
4863 return iemRaiseGeneralProtectionFault0(pIemCpu);
4864 pCtx->rip = uNewRip;
4865 }
4866 pCtx->eflags.Bits.u1RF = 0;
4867 return VINF_SUCCESS;
4868}
4869
4870
4871/**
4872 * Performs a near jump to the specified address.
4873 *
4874 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4875 * segment limit.
4876 *
4877 * @param pIemCpu The per CPU data.
4878 * @param uNewRip The new RIP value.
4879 */
4880IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4881{
4882 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4883 switch (pIemCpu->enmEffOpSize)
4884 {
4885 case IEMMODE_16BIT:
4886 {
4887 Assert(uNewRip <= UINT16_MAX);
4888 if ( uNewRip > pCtx->cs.u32Limit
4889 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4890 return iemRaiseGeneralProtectionFault0(pIemCpu);
4891 /** @todo Test 16-bit jump in 64-bit mode. */
4892 pCtx->rip = uNewRip;
4893 break;
4894 }
4895
4896 case IEMMODE_32BIT:
4897 {
4898 Assert(uNewRip <= UINT32_MAX);
4899 Assert(pCtx->rip <= UINT32_MAX);
4900 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4901
4902 if (uNewRip > pCtx->cs.u32Limit)
4903 return iemRaiseGeneralProtectionFault0(pIemCpu);
4904 pCtx->rip = uNewRip;
4905 break;
4906 }
4907
4908 case IEMMODE_64BIT:
4909 {
4910 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4911
4912 if (!IEM_IS_CANONICAL(uNewRip))
4913 return iemRaiseGeneralProtectionFault0(pIemCpu);
4914 pCtx->rip = uNewRip;
4915 break;
4916 }
4917
4918 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4919 }
4920
4921 pCtx->eflags.Bits.u1RF = 0;
4922 return VINF_SUCCESS;
4923}
4924
4925
4926/**
4927 * Get the address of the top of the stack.
4928 *
4929 * @param pIemCpu The per CPU data.
4930 * @param pCtx The CPU context which SP/ESP/RSP should be
4931 * read.
4932 */
4933DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4934{
4935 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4936 return pCtx->rsp;
4937 if (pCtx->ss.Attr.n.u1DefBig)
4938 return pCtx->esp;
4939 return pCtx->sp;
4940}
4941
4942
4943/**
4944 * Updates the RIP/EIP/IP to point to the next instruction.
4945 *
4946 * This function leaves the EFLAGS.RF flag alone.
4947 *
4948 * @param pIemCpu The per CPU data.
4949 * @param cbInstr The number of bytes to add.
4950 */
4951IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4952{
4953 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4954 switch (pIemCpu->enmCpuMode)
4955 {
4956 case IEMMODE_16BIT:
4957 Assert(pCtx->rip <= UINT16_MAX);
4958 pCtx->eip += cbInstr;
4959 pCtx->eip &= UINT32_C(0xffff);
4960 break;
4961
4962 case IEMMODE_32BIT:
4963 pCtx->eip += cbInstr;
4964 Assert(pCtx->rip <= UINT32_MAX);
4965 break;
4966
4967 case IEMMODE_64BIT:
4968 pCtx->rip += cbInstr;
4969 break;
4970 default: AssertFailed();
4971 }
4972}
4973
4974
4975#if 0
4976/**
4977 * Updates the RIP/EIP/IP to point to the next instruction.
4978 *
4979 * @param pIemCpu The per CPU data.
4980 */
4981IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4982{
4983 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4984}
4985#endif
4986
4987
4988
4989/**
4990 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4991 *
4992 * @param pIemCpu The per CPU data.
4993 * @param cbInstr The number of bytes to add.
4994 */
4995IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4996{
4997 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4998
4999 pCtx->eflags.Bits.u1RF = 0;
5000
5001 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
5002 switch (pIemCpu->enmCpuMode)
5003 {
5004 /** @todo investigate if EIP or RIP is really incremented. */
5005 case IEMMODE_16BIT:
5006 case IEMMODE_32BIT:
5007 pCtx->eip += cbInstr;
5008 Assert(pCtx->rip <= UINT32_MAX);
5009 break;
5010
5011 case IEMMODE_64BIT:
5012 pCtx->rip += cbInstr;
5013 break;
5014 default: AssertFailed();
5015 }
5016}
5017
5018
5019/**
5020 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
5021 *
5022 * @param pIemCpu The per CPU data.
5023 */
5024IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
5025{
5026 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
5027}
5028
5029
5030/**
5031 * Adds to the stack pointer.
5032 *
5033 * @param pIemCpu The per CPU data.
5034 * @param pCtx The CPU context which SP/ESP/RSP should be
5035 * updated.
5036 * @param cbToAdd The number of bytes to add (8-bit!).
5037 */
5038DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
5039{
5040 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5041 pCtx->rsp += cbToAdd;
5042 else if (pCtx->ss.Attr.n.u1DefBig)
5043 pCtx->esp += cbToAdd;
5044 else
5045 pCtx->sp += cbToAdd;
5046}
5047
5048
5049/**
5050 * Subtracts from the stack pointer.
5051 *
5052 * @param pIemCpu The per CPU data.
5053 * @param pCtx The CPU context which SP/ESP/RSP should be
5054 * updated.
5055 * @param cbToSub The number of bytes to subtract (8-bit!).
5056 */
5057DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
5058{
5059 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5060 pCtx->rsp -= cbToSub;
5061 else if (pCtx->ss.Attr.n.u1DefBig)
5062 pCtx->esp -= cbToSub;
5063 else
5064 pCtx->sp -= cbToSub;
5065}
5066
5067
5068/**
5069 * Adds to the temporary stack pointer.
5070 *
5071 * @param pIemCpu The per CPU data.
5072 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5073 * @param cbToAdd The number of bytes to add (16-bit).
5074 * @param pCtx Where to get the current stack mode.
5075 */
5076DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
5077{
5078 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5079 pTmpRsp->u += cbToAdd;
5080 else if (pCtx->ss.Attr.n.u1DefBig)
5081 pTmpRsp->DWords.dw0 += cbToAdd;
5082 else
5083 pTmpRsp->Words.w0 += cbToAdd;
5084}
5085
5086
5087/**
5088 * Subtracts from the temporary stack pointer.
5089 *
5090 * @param pIemCpu The per CPU data.
5091 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5092 * @param cbToSub The number of bytes to subtract.
5093 * @param pCtx Where to get the current stack mode.
5094 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5095 * expecting that.
5096 */
5097DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5098{
5099 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5100 pTmpRsp->u -= cbToSub;
5101 else if (pCtx->ss.Attr.n.u1DefBig)
5102 pTmpRsp->DWords.dw0 -= cbToSub;
5103 else
5104 pTmpRsp->Words.w0 -= cbToSub;
5105}
5106
5107
5108/**
5109 * Calculates the effective stack address for a push of the specified size as
5110 * well as the new RSP value (upper bits may be masked).
5111 *
5112 * @returns Effective stack addressf for the push.
5113 * @param pIemCpu The IEM per CPU data.
5114 * @param pCtx Where to get the current stack mode.
5115 * @param cbItem The size of the stack item to pop.
5116 * @param puNewRsp Where to return the new RSP value.
5117 */
5118DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5119{
5120 RTUINT64U uTmpRsp;
5121 RTGCPTR GCPtrTop;
5122 uTmpRsp.u = pCtx->rsp;
5123
5124 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5125 GCPtrTop = uTmpRsp.u -= cbItem;
5126 else if (pCtx->ss.Attr.n.u1DefBig)
5127 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5128 else
5129 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5130 *puNewRsp = uTmpRsp.u;
5131 return GCPtrTop;
5132}
5133
5134
5135/**
5136 * Gets the current stack pointer and calculates the value after a pop of the
5137 * specified size.
5138 *
5139 * @returns Current stack pointer.
5140 * @param pIemCpu The per CPU data.
5141 * @param pCtx Where to get the current stack mode.
5142 * @param cbItem The size of the stack item to pop.
5143 * @param puNewRsp Where to return the new RSP value.
5144 */
5145DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5146{
5147 RTUINT64U uTmpRsp;
5148 RTGCPTR GCPtrTop;
5149 uTmpRsp.u = pCtx->rsp;
5150
5151 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5152 {
5153 GCPtrTop = uTmpRsp.u;
5154 uTmpRsp.u += cbItem;
5155 }
5156 else if (pCtx->ss.Attr.n.u1DefBig)
5157 {
5158 GCPtrTop = uTmpRsp.DWords.dw0;
5159 uTmpRsp.DWords.dw0 += cbItem;
5160 }
5161 else
5162 {
5163 GCPtrTop = uTmpRsp.Words.w0;
5164 uTmpRsp.Words.w0 += cbItem;
5165 }
5166 *puNewRsp = uTmpRsp.u;
5167 return GCPtrTop;
5168}
5169
5170
5171/**
5172 * Calculates the effective stack address for a push of the specified size as
5173 * well as the new temporary RSP value (upper bits may be masked).
5174 *
5175 * @returns Effective stack addressf for the push.
5176 * @param pIemCpu The per CPU data.
5177 * @param pCtx Where to get the current stack mode.
5178 * @param pTmpRsp The temporary stack pointer. This is updated.
5179 * @param cbItem The size of the stack item to pop.
5180 */
5181DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5182{
5183 RTGCPTR GCPtrTop;
5184
5185 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5186 GCPtrTop = pTmpRsp->u -= cbItem;
5187 else if (pCtx->ss.Attr.n.u1DefBig)
5188 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5189 else
5190 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5191 return GCPtrTop;
5192}
5193
5194
5195/**
5196 * Gets the effective stack address for a pop of the specified size and
5197 * calculates and updates the temporary RSP.
5198 *
5199 * @returns Current stack pointer.
5200 * @param pIemCpu The per CPU data.
5201 * @param pCtx Where to get the current stack mode.
5202 * @param pTmpRsp The temporary stack pointer. This is updated.
5203 * @param cbItem The size of the stack item to pop.
5204 */
5205DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5206{
5207 RTGCPTR GCPtrTop;
5208 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5209 {
5210 GCPtrTop = pTmpRsp->u;
5211 pTmpRsp->u += cbItem;
5212 }
5213 else if (pCtx->ss.Attr.n.u1DefBig)
5214 {
5215 GCPtrTop = pTmpRsp->DWords.dw0;
5216 pTmpRsp->DWords.dw0 += cbItem;
5217 }
5218 else
5219 {
5220 GCPtrTop = pTmpRsp->Words.w0;
5221 pTmpRsp->Words.w0 += cbItem;
5222 }
5223 return GCPtrTop;
5224}
5225
5226/** @} */
5227
5228
5229/** @name FPU access and helpers.
5230 *
5231 * @{
5232 */
5233
5234
5235/**
5236 * Hook for preparing to use the host FPU.
5237 *
5238 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
5239 *
5240 * @param pIemCpu The IEM per CPU data.
5241 */
5242DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5243{
5244#ifdef IN_RING3
5245 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
5246#else
5247 CPUMRZFpuStatePrepareHostCpuForUse(IEMCPU_TO_VMCPU(pIemCpu));
5248#endif
5249}
5250
5251
5252/**
5253 * Hook for preparing to use the host FPU for SSE
5254 *
5255 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
5256 *
5257 * @param pIemCpu The IEM per CPU data.
5258 */
5259DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5260{
5261 iemFpuPrepareUsage(pIemCpu);
5262}
5263
5264
5265/**
5266 * Hook for actualizing the guest FPU state before the interpreter reads it.
5267 *
5268 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
5269 *
5270 * @param pIemCpu The IEM per CPU data.
5271 */
5272DECLINLINE(void) iemFpuActualizeStateForRead(PIEMCPU pIemCpu)
5273{
5274#ifdef IN_RING3
5275 NOREF(pIemCpu);
5276#else
5277 CPUMRZFpuStateActualizeForRead(IEMCPU_TO_VMCPU(pIemCpu));
5278#endif
5279}
5280
5281
5282/**
5283 * Hook for actualizing the guest FPU state before the interpreter changes it.
5284 *
5285 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
5286 *
5287 * @param pIemCpu The IEM per CPU data.
5288 */
5289DECLINLINE(void) iemFpuActualizeStateForChange(PIEMCPU pIemCpu)
5290{
5291#ifdef IN_RING3
5292 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
5293#else
5294 CPUMRZFpuStateActualizeForChange(IEMCPU_TO_VMCPU(pIemCpu));
5295#endif
5296}
5297
5298
5299/**
5300 * Hook for actualizing the guest XMM0..15 register state for read only.
5301 *
5302 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
5303 *
5304 * @param pIemCpu The IEM per CPU data.
5305 */
5306DECLINLINE(void) iemFpuActualizeSseStateForRead(PIEMCPU pIemCpu)
5307{
5308#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
5309 NOREF(pIemCpu);
5310#else
5311 CPUMRZFpuStateActualizeSseForRead(IEMCPU_TO_VMCPU(pIemCpu));
5312#endif
5313}
5314
5315
5316/**
5317 * Hook for actualizing the guest XMM0..15 register state for read+write.
5318 *
5319 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
5320 *
5321 * @param pIemCpu The IEM per CPU data.
5322 */
5323DECLINLINE(void) iemFpuActualizeSseStateForChange(PIEMCPU pIemCpu)
5324{
5325#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
5326 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
5327#else
5328 CPUMRZFpuStateActualizeForChange(IEMCPU_TO_VMCPU(pIemCpu));
5329#endif
5330}
5331
5332
5333/**
5334 * Stores a QNaN value into a FPU register.
5335 *
5336 * @param pReg Pointer to the register.
5337 */
5338DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5339{
5340 pReg->au32[0] = UINT32_C(0x00000000);
5341 pReg->au32[1] = UINT32_C(0xc0000000);
5342 pReg->au16[4] = UINT16_C(0xffff);
5343}
5344
5345
5346/**
5347 * Updates the FOP, FPU.CS and FPUIP registers.
5348 *
5349 * @param pIemCpu The IEM per CPU data.
5350 * @param pCtx The CPU context.
5351 * @param pFpuCtx The FPU context.
5352 */
5353DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5354{
5355 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5356 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5357 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5358 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5359 {
5360 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5361 * happens in real mode here based on the fnsave and fnstenv images. */
5362 pFpuCtx->CS = 0;
5363 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5364 }
5365 else
5366 {
5367 pFpuCtx->CS = pCtx->cs.Sel;
5368 pFpuCtx->FPUIP = pCtx->rip;
5369 }
5370}
5371
5372
5373/**
5374 * Updates the x87.DS and FPUDP registers.
5375 *
5376 * @param pIemCpu The IEM per CPU data.
5377 * @param pCtx The CPU context.
5378 * @param pFpuCtx The FPU context.
5379 * @param iEffSeg The effective segment register.
5380 * @param GCPtrEff The effective address relative to @a iEffSeg.
5381 */
5382DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5383{
5384 RTSEL sel;
5385 switch (iEffSeg)
5386 {
5387 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5388 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5389 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5390 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5391 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5392 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5393 default:
5394 AssertMsgFailed(("%d\n", iEffSeg));
5395 sel = pCtx->ds.Sel;
5396 }
5397 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5398 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5399 {
5400 pFpuCtx->DS = 0;
5401 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
5402 }
5403 else
5404 {
5405 pFpuCtx->DS = sel;
5406 pFpuCtx->FPUDP = GCPtrEff;
5407 }
5408}
5409
5410
5411/**
5412 * Rotates the stack registers in the push direction.
5413 *
5414 * @param pFpuCtx The FPU context.
5415 * @remarks This is a complete waste of time, but fxsave stores the registers in
5416 * stack order.
5417 */
5418DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5419{
5420 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5421 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5422 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5423 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5424 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5425 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5426 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5427 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5428 pFpuCtx->aRegs[0].r80 = r80Tmp;
5429}
5430
5431
5432/**
5433 * Rotates the stack registers in the pop direction.
5434 *
5435 * @param pFpuCtx The FPU context.
5436 * @remarks This is a complete waste of time, but fxsave stores the registers in
5437 * stack order.
5438 */
5439DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5440{
5441 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5442 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5443 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5444 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5445 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5446 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5447 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5448 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5449 pFpuCtx->aRegs[7].r80 = r80Tmp;
5450}
5451
5452
5453/**
5454 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5455 * exception prevents it.
5456 *
5457 * @param pIemCpu The IEM per CPU data.
5458 * @param pResult The FPU operation result to push.
5459 * @param pFpuCtx The FPU context.
5460 */
5461IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5462{
5463 /* Update FSW and bail if there are pending exceptions afterwards. */
5464 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5465 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5466 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5467 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5468 {
5469 pFpuCtx->FSW = fFsw;
5470 return;
5471 }
5472
5473 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5474 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5475 {
5476 /* All is fine, push the actual value. */
5477 pFpuCtx->FTW |= RT_BIT(iNewTop);
5478 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5479 }
5480 else if (pFpuCtx->FCW & X86_FCW_IM)
5481 {
5482 /* Masked stack overflow, push QNaN. */
5483 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5484 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5485 }
5486 else
5487 {
5488 /* Raise stack overflow, don't push anything. */
5489 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5490 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5491 return;
5492 }
5493
5494 fFsw &= ~X86_FSW_TOP_MASK;
5495 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5496 pFpuCtx->FSW = fFsw;
5497
5498 iemFpuRotateStackPush(pFpuCtx);
5499}
5500
5501
5502/**
5503 * Stores a result in a FPU register and updates the FSW and FTW.
5504 *
5505 * @param pFpuCtx The FPU context.
5506 * @param pResult The result to store.
5507 * @param iStReg Which FPU register to store it in.
5508 */
5509IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5510{
5511 Assert(iStReg < 8);
5512 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5513 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5514 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5515 pFpuCtx->FTW |= RT_BIT(iReg);
5516 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5517}
5518
5519
5520/**
5521 * Only updates the FPU status word (FSW) with the result of the current
5522 * instruction.
5523 *
5524 * @param pFpuCtx The FPU context.
5525 * @param u16FSW The FSW output of the current instruction.
5526 */
5527IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5528{
5529 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5530 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5531}
5532
5533
5534/**
5535 * Pops one item off the FPU stack if no pending exception prevents it.
5536 *
5537 * @param pFpuCtx The FPU context.
5538 */
5539IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5540{
5541 /* Check pending exceptions. */
5542 uint16_t uFSW = pFpuCtx->FSW;
5543 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5544 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5545 return;
5546
5547 /* TOP--. */
5548 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5549 uFSW &= ~X86_FSW_TOP_MASK;
5550 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5551 pFpuCtx->FSW = uFSW;
5552
5553 /* Mark the previous ST0 as empty. */
5554 iOldTop >>= X86_FSW_TOP_SHIFT;
5555 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5556
5557 /* Rotate the registers. */
5558 iemFpuRotateStackPop(pFpuCtx);
5559}
5560
5561
5562/**
5563 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5564 *
5565 * @param pIemCpu The IEM per CPU data.
5566 * @param pResult The FPU operation result to push.
5567 */
5568IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5569{
5570 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5571 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5572 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5573 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5574}
5575
5576
5577/**
5578 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5579 * and sets FPUDP and FPUDS.
5580 *
5581 * @param pIemCpu The IEM per CPU data.
5582 * @param pResult The FPU operation result to push.
5583 * @param iEffSeg The effective segment register.
5584 * @param GCPtrEff The effective address relative to @a iEffSeg.
5585 */
5586IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5587{
5588 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5589 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5590 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5591 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5592 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5593}
5594
5595
5596/**
5597 * Replace ST0 with the first value and push the second onto the FPU stack,
5598 * unless a pending exception prevents it.
5599 *
5600 * @param pIemCpu The IEM per CPU data.
5601 * @param pResult The FPU operation result to store and push.
5602 */
5603IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5604{
5605 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5606 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5607 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5608
5609 /* Update FSW and bail if there are pending exceptions afterwards. */
5610 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5611 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5612 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5613 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5614 {
5615 pFpuCtx->FSW = fFsw;
5616 return;
5617 }
5618
5619 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5620 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5621 {
5622 /* All is fine, push the actual value. */
5623 pFpuCtx->FTW |= RT_BIT(iNewTop);
5624 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5625 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5626 }
5627 else if (pFpuCtx->FCW & X86_FCW_IM)
5628 {
5629 /* Masked stack overflow, push QNaN. */
5630 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5631 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5632 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5633 }
5634 else
5635 {
5636 /* Raise stack overflow, don't push anything. */
5637 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5638 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5639 return;
5640 }
5641
5642 fFsw &= ~X86_FSW_TOP_MASK;
5643 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5644 pFpuCtx->FSW = fFsw;
5645
5646 iemFpuRotateStackPush(pFpuCtx);
5647}
5648
5649
5650/**
5651 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5652 * FOP.
5653 *
5654 * @param pIemCpu The IEM per CPU data.
5655 * @param pResult The result to store.
5656 * @param iStReg Which FPU register to store it in.
5657 */
5658IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5659{
5660 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5661 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5662 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5663 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5664}
5665
5666
5667/**
5668 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5669 * FOP, and then pops the stack.
5670 *
5671 * @param pIemCpu The IEM per CPU data.
5672 * @param pResult The result to store.
5673 * @param iStReg Which FPU register to store it in.
5674 */
5675IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5676{
5677 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5678 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5679 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5680 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5681 iemFpuMaybePopOne(pFpuCtx);
5682}
5683
5684
5685/**
5686 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5687 * FPUDP, and FPUDS.
5688 *
5689 * @param pIemCpu The IEM per CPU data.
5690 * @param pResult The result to store.
5691 * @param iStReg Which FPU register to store it in.
5692 * @param iEffSeg The effective memory operand selector register.
5693 * @param GCPtrEff The effective memory operand offset.
5694 */
5695IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5696 uint8_t iEffSeg, RTGCPTR GCPtrEff)
5697{
5698 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5699 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5700 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5701 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5702 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5703}
5704
5705
5706/**
5707 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5708 * FPUDP, and FPUDS, and then pops the stack.
5709 *
5710 * @param pIemCpu The IEM per CPU data.
5711 * @param pResult The result to store.
5712 * @param iStReg Which FPU register to store it in.
5713 * @param iEffSeg The effective memory operand selector register.
5714 * @param GCPtrEff The effective memory operand offset.
5715 */
5716IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5717 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5718{
5719 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5720 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5721 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5722 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5723 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5724 iemFpuMaybePopOne(pFpuCtx);
5725}
5726
5727
5728/**
5729 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5730 *
5731 * @param pIemCpu The IEM per CPU data.
5732 */
5733IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5734{
5735 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5736 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5737 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5738}
5739
5740
5741/**
5742 * Marks the specified stack register as free (for FFREE).
5743 *
5744 * @param pIemCpu The IEM per CPU data.
5745 * @param iStReg The register to free.
5746 */
5747IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5748{
5749 Assert(iStReg < 8);
5750 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5751 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5752 pFpuCtx->FTW &= ~RT_BIT(iReg);
5753}
5754
5755
5756/**
5757 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5758 *
5759 * @param pIemCpu The IEM per CPU data.
5760 */
5761IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5762{
5763 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5764 uint16_t uFsw = pFpuCtx->FSW;
5765 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5766 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5767 uFsw &= ~X86_FSW_TOP_MASK;
5768 uFsw |= uTop;
5769 pFpuCtx->FSW = uFsw;
5770}
5771
5772
5773/**
5774 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5775 *
5776 * @param pIemCpu The IEM per CPU data.
5777 */
5778IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5779{
5780 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5781 uint16_t uFsw = pFpuCtx->FSW;
5782 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5783 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5784 uFsw &= ~X86_FSW_TOP_MASK;
5785 uFsw |= uTop;
5786 pFpuCtx->FSW = uFsw;
5787}
5788
5789
5790/**
5791 * Updates the FSW, FOP, FPUIP, and FPUCS.
5792 *
5793 * @param pIemCpu The IEM per CPU data.
5794 * @param u16FSW The FSW from the current instruction.
5795 */
5796IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5797{
5798 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5799 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5800 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5801 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5802}
5803
5804
5805/**
5806 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5807 *
5808 * @param pIemCpu The IEM per CPU data.
5809 * @param u16FSW The FSW from the current instruction.
5810 */
5811IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5812{
5813 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5814 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5815 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5816 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5817 iemFpuMaybePopOne(pFpuCtx);
5818}
5819
5820
5821/**
5822 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5823 *
5824 * @param pIemCpu The IEM per CPU data.
5825 * @param u16FSW The FSW from the current instruction.
5826 * @param iEffSeg The effective memory operand selector register.
5827 * @param GCPtrEff The effective memory operand offset.
5828 */
5829IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5830{
5831 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5832 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5833 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5834 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5835 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5836}
5837
5838
5839/**
5840 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5841 *
5842 * @param pIemCpu The IEM per CPU data.
5843 * @param u16FSW The FSW from the current instruction.
5844 */
5845IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5846{
5847 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5848 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5849 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5850 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5851 iemFpuMaybePopOne(pFpuCtx);
5852 iemFpuMaybePopOne(pFpuCtx);
5853}
5854
5855
5856/**
5857 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5858 *
5859 * @param pIemCpu The IEM per CPU data.
5860 * @param u16FSW The FSW from the current instruction.
5861 * @param iEffSeg The effective memory operand selector register.
5862 * @param GCPtrEff The effective memory operand offset.
5863 */
5864IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5865{
5866 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5867 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5868 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5869 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5870 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5871 iemFpuMaybePopOne(pFpuCtx);
5872}
5873
5874
5875/**
5876 * Worker routine for raising an FPU stack underflow exception.
5877 *
5878 * @param pIemCpu The IEM per CPU data.
5879 * @param pFpuCtx The FPU context.
5880 * @param iStReg The stack register being accessed.
5881 */
5882IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5883{
5884 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5885 if (pFpuCtx->FCW & X86_FCW_IM)
5886 {
5887 /* Masked underflow. */
5888 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5889 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5890 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5891 if (iStReg != UINT8_MAX)
5892 {
5893 pFpuCtx->FTW |= RT_BIT(iReg);
5894 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5895 }
5896 }
5897 else
5898 {
5899 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5900 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5901 }
5902}
5903
5904
5905/**
5906 * Raises a FPU stack underflow exception.
5907 *
5908 * @param pIemCpu The IEM per CPU data.
5909 * @param iStReg The destination register that should be loaded
5910 * with QNaN if \#IS is not masked. Specify
5911 * UINT8_MAX if none (like for fcom).
5912 */
5913DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5914{
5915 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5916 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5917 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5918 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5919}
5920
5921
5922DECL_NO_INLINE(IEM_STATIC, void)
5923iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5924{
5925 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5926 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5927 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5928 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5929 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5930}
5931
5932
5933DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5934{
5935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5936 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5937 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5938 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5939 iemFpuMaybePopOne(pFpuCtx);
5940}
5941
5942
5943DECL_NO_INLINE(IEM_STATIC, void)
5944iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5945{
5946 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5947 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5948 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5949 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5950 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5951 iemFpuMaybePopOne(pFpuCtx);
5952}
5953
5954
5955DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5956{
5957 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5958 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5959 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5960 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5961 iemFpuMaybePopOne(pFpuCtx);
5962 iemFpuMaybePopOne(pFpuCtx);
5963}
5964
5965
5966DECL_NO_INLINE(IEM_STATIC, void)
5967iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5968{
5969 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5970 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5971 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5972
5973 if (pFpuCtx->FCW & X86_FCW_IM)
5974 {
5975 /* Masked overflow - Push QNaN. */
5976 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5977 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5978 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5979 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5980 pFpuCtx->FTW |= RT_BIT(iNewTop);
5981 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5982 iemFpuRotateStackPush(pFpuCtx);
5983 }
5984 else
5985 {
5986 /* Exception pending - don't change TOP or the register stack. */
5987 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5988 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5989 }
5990}
5991
5992
5993DECL_NO_INLINE(IEM_STATIC, void)
5994iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5995{
5996 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5997 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5998 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5999
6000 if (pFpuCtx->FCW & X86_FCW_IM)
6001 {
6002 /* Masked overflow - Push QNaN. */
6003 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6004 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6005 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6006 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6007 pFpuCtx->FTW |= RT_BIT(iNewTop);
6008 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6009 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6010 iemFpuRotateStackPush(pFpuCtx);
6011 }
6012 else
6013 {
6014 /* Exception pending - don't change TOP or the register stack. */
6015 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6016 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6017 }
6018}
6019
6020
6021/**
6022 * Worker routine for raising an FPU stack overflow exception on a push.
6023 *
6024 * @param pFpuCtx The FPU context.
6025 */
6026IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
6027{
6028 if (pFpuCtx->FCW & X86_FCW_IM)
6029 {
6030 /* Masked overflow. */
6031 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6032 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6033 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
6034 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6035 pFpuCtx->FTW |= RT_BIT(iNewTop);
6036 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6037 iemFpuRotateStackPush(pFpuCtx);
6038 }
6039 else
6040 {
6041 /* Exception pending - don't change TOP or the register stack. */
6042 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6043 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6044 }
6045}
6046
6047
6048/**
6049 * Raises a FPU stack overflow exception on a push.
6050 *
6051 * @param pIemCpu The IEM per CPU data.
6052 */
6053DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
6054{
6055 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6056 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6057 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6058 iemFpuStackPushOverflowOnly(pFpuCtx);
6059}
6060
6061
6062/**
6063 * Raises a FPU stack overflow exception on a push with a memory operand.
6064 *
6065 * @param pIemCpu The IEM per CPU data.
6066 * @param iEffSeg The effective memory operand selector register.
6067 * @param GCPtrEff The effective memory operand offset.
6068 */
6069DECL_NO_INLINE(IEM_STATIC, void)
6070iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6071{
6072 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6073 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6074 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6075 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6076 iemFpuStackPushOverflowOnly(pFpuCtx);
6077}
6078
6079
6080IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
6081{
6082 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6083 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6084 if (pFpuCtx->FTW & RT_BIT(iReg))
6085 return VINF_SUCCESS;
6086 return VERR_NOT_FOUND;
6087}
6088
6089
6090IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
6091{
6092 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6093 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6094 if (pFpuCtx->FTW & RT_BIT(iReg))
6095 {
6096 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
6097 return VINF_SUCCESS;
6098 }
6099 return VERR_NOT_FOUND;
6100}
6101
6102
6103IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
6104 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
6105{
6106 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6107 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6108 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6109 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6110 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6111 {
6112 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6113 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
6114 return VINF_SUCCESS;
6115 }
6116 return VERR_NOT_FOUND;
6117}
6118
6119
6120IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
6121{
6122 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6123 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6124 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6125 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6126 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6127 {
6128 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6129 return VINF_SUCCESS;
6130 }
6131 return VERR_NOT_FOUND;
6132}
6133
6134
6135/**
6136 * Updates the FPU exception status after FCW is changed.
6137 *
6138 * @param pFpuCtx The FPU context.
6139 */
6140IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
6141{
6142 uint16_t u16Fsw = pFpuCtx->FSW;
6143 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
6144 u16Fsw |= X86_FSW_ES | X86_FSW_B;
6145 else
6146 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
6147 pFpuCtx->FSW = u16Fsw;
6148}
6149
6150
6151/**
6152 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6153 *
6154 * @returns The full FTW.
6155 * @param pFpuCtx The FPU context.
6156 */
6157IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6158{
6159 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6160 uint16_t u16Ftw = 0;
6161 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6162 for (unsigned iSt = 0; iSt < 8; iSt++)
6163 {
6164 unsigned const iReg = (iSt + iTop) & 7;
6165 if (!(u8Ftw & RT_BIT(iReg)))
6166 u16Ftw |= 3 << (iReg * 2); /* empty */
6167 else
6168 {
6169 uint16_t uTag;
6170 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6171 if (pr80Reg->s.uExponent == 0x7fff)
6172 uTag = 2; /* Exponent is all 1's => Special. */
6173 else if (pr80Reg->s.uExponent == 0x0000)
6174 {
6175 if (pr80Reg->s.u64Mantissa == 0x0000)
6176 uTag = 1; /* All bits are zero => Zero. */
6177 else
6178 uTag = 2; /* Must be special. */
6179 }
6180 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6181 uTag = 0; /* Valid. */
6182 else
6183 uTag = 2; /* Must be special. */
6184
6185 u16Ftw |= uTag << (iReg * 2); /* empty */
6186 }
6187 }
6188
6189 return u16Ftw;
6190}
6191
6192
6193/**
6194 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6195 *
6196 * @returns The compressed FTW.
6197 * @param u16FullFtw The full FTW to convert.
6198 */
6199IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6200{
6201 uint8_t u8Ftw = 0;
6202 for (unsigned i = 0; i < 8; i++)
6203 {
6204 if ((u16FullFtw & 3) != 3 /*empty*/)
6205 u8Ftw |= RT_BIT(i);
6206 u16FullFtw >>= 2;
6207 }
6208
6209 return u8Ftw;
6210}
6211
6212/** @} */
6213
6214
6215/** @name Memory access.
6216 *
6217 * @{
6218 */
6219
6220
6221/**
6222 * Updates the IEMCPU::cbWritten counter if applicable.
6223 *
6224 * @param pIemCpu The IEM per CPU data.
6225 * @param fAccess The access being accounted for.
6226 * @param cbMem The access size.
6227 */
6228DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6229{
6230 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6231 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6232 pIemCpu->cbWritten += (uint32_t)cbMem;
6233}
6234
6235
6236/**
6237 * Checks if the given segment can be written to, raise the appropriate
6238 * exception if not.
6239 *
6240 * @returns VBox strict status code.
6241 *
6242 * @param pIemCpu The IEM per CPU data.
6243 * @param pHid Pointer to the hidden register.
6244 * @param iSegReg The register number.
6245 * @param pu64BaseAddr Where to return the base address to use for the
6246 * segment. (In 64-bit code it may differ from the
6247 * base in the hidden segment.)
6248 */
6249IEM_STATIC VBOXSTRICTRC
6250iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6251{
6252 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6253 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6254 else
6255 {
6256 if (!pHid->Attr.n.u1Present)
6257 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6258
6259 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6260 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6261 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6262 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6263 *pu64BaseAddr = pHid->u64Base;
6264 }
6265 return VINF_SUCCESS;
6266}
6267
6268
6269/**
6270 * Checks if the given segment can be read from, raise the appropriate
6271 * exception if not.
6272 *
6273 * @returns VBox strict status code.
6274 *
6275 * @param pIemCpu The IEM per CPU data.
6276 * @param pHid Pointer to the hidden register.
6277 * @param iSegReg The register number.
6278 * @param pu64BaseAddr Where to return the base address to use for the
6279 * segment. (In 64-bit code it may differ from the
6280 * base in the hidden segment.)
6281 */
6282IEM_STATIC VBOXSTRICTRC
6283iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6284{
6285 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6286 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6287 else
6288 {
6289 if (!pHid->Attr.n.u1Present)
6290 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6291
6292 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6293 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6294 *pu64BaseAddr = pHid->u64Base;
6295 }
6296 return VINF_SUCCESS;
6297}
6298
6299
6300/**
6301 * Applies the segment limit, base and attributes.
6302 *
6303 * This may raise a \#GP or \#SS.
6304 *
6305 * @returns VBox strict status code.
6306 *
6307 * @param pIemCpu The IEM per CPU data.
6308 * @param fAccess The kind of access which is being performed.
6309 * @param iSegReg The index of the segment register to apply.
6310 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6311 * TSS, ++).
6312 * @param cbMem The access size.
6313 * @param pGCPtrMem Pointer to the guest memory address to apply
6314 * segmentation to. Input and output parameter.
6315 */
6316IEM_STATIC VBOXSTRICTRC
6317iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6318{
6319 if (iSegReg == UINT8_MAX)
6320 return VINF_SUCCESS;
6321
6322 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6323 switch (pIemCpu->enmCpuMode)
6324 {
6325 case IEMMODE_16BIT:
6326 case IEMMODE_32BIT:
6327 {
6328 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6329 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6330
6331 if ( pSel->Attr.n.u1Present
6332 && !pSel->Attr.n.u1Unusable)
6333 {
6334 Assert(pSel->Attr.n.u1DescType);
6335 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6336 {
6337 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6338 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6339 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6340
6341 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6342 {
6343 /** @todo CPL check. */
6344 }
6345
6346 /*
6347 * There are two kinds of data selectors, normal and expand down.
6348 */
6349 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6350 {
6351 if ( GCPtrFirst32 > pSel->u32Limit
6352 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6353 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6354 }
6355 else
6356 {
6357 /*
6358 * The upper boundary is defined by the B bit, not the G bit!
6359 */
6360 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6361 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6362 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6363 }
6364 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6365 }
6366 else
6367 {
6368
6369 /*
6370 * Code selector and usually be used to read thru, writing is
6371 * only permitted in real and V8086 mode.
6372 */
6373 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6374 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6375 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6376 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6377 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6378
6379 if ( GCPtrFirst32 > pSel->u32Limit
6380 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6381 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6382
6383 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6384 {
6385 /** @todo CPL check. */
6386 }
6387
6388 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6389 }
6390 }
6391 else
6392 return iemRaiseGeneralProtectionFault0(pIemCpu);
6393 return VINF_SUCCESS;
6394 }
6395
6396 case IEMMODE_64BIT:
6397 {
6398 RTGCPTR GCPtrMem = *pGCPtrMem;
6399 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6400 *pGCPtrMem = GCPtrMem + pSel->u64Base;
6401
6402 Assert(cbMem >= 1);
6403 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
6404 return VINF_SUCCESS;
6405 return iemRaiseGeneralProtectionFault0(pIemCpu);
6406 }
6407
6408 default:
6409 AssertFailedReturn(VERR_IEM_IPE_7);
6410 }
6411}
6412
6413
6414/**
6415 * Translates a virtual address to a physical physical address and checks if we
6416 * can access the page as specified.
6417 *
6418 * @param pIemCpu The IEM per CPU data.
6419 * @param GCPtrMem The virtual address.
6420 * @param fAccess The intended access.
6421 * @param pGCPhysMem Where to return the physical address.
6422 */
6423IEM_STATIC VBOXSTRICTRC
6424iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6425{
6426 /** @todo Need a different PGM interface here. We're currently using
6427 * generic / REM interfaces. this won't cut it for R0 & RC. */
6428 RTGCPHYS GCPhys;
6429 uint64_t fFlags;
6430 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6431 if (RT_FAILURE(rc))
6432 {
6433 /** @todo Check unassigned memory in unpaged mode. */
6434 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6435 *pGCPhysMem = NIL_RTGCPHYS;
6436 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6437 }
6438
6439 /* If the page is writable and does not have the no-exec bit set, all
6440 access is allowed. Otherwise we'll have to check more carefully... */
6441 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6442 {
6443 /* Write to read only memory? */
6444 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6445 && !(fFlags & X86_PTE_RW)
6446 && ( pIemCpu->uCpl != 0
6447 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6448 {
6449 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6450 *pGCPhysMem = NIL_RTGCPHYS;
6451 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6452 }
6453
6454 /* Kernel memory accessed by userland? */
6455 if ( !(fFlags & X86_PTE_US)
6456 && pIemCpu->uCpl == 3
6457 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6458 {
6459 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6460 *pGCPhysMem = NIL_RTGCPHYS;
6461 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6462 }
6463
6464 /* Executing non-executable memory? */
6465 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6466 && (fFlags & X86_PTE_PAE_NX)
6467 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6468 {
6469 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6470 *pGCPhysMem = NIL_RTGCPHYS;
6471 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6472 VERR_ACCESS_DENIED);
6473 }
6474 }
6475
6476 /*
6477 * Set the dirty / access flags.
6478 * ASSUMES this is set when the address is translated rather than on committ...
6479 */
6480 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6481 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6482 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6483 {
6484 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6485 AssertRC(rc2);
6486 }
6487
6488 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6489 *pGCPhysMem = GCPhys;
6490 return VINF_SUCCESS;
6491}
6492
6493
6494
6495/**
6496 * Maps a physical page.
6497 *
6498 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6499 * @param pIemCpu The IEM per CPU data.
6500 * @param GCPhysMem The physical address.
6501 * @param fAccess The intended access.
6502 * @param ppvMem Where to return the mapping address.
6503 * @param pLock The PGM lock.
6504 */
6505IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6506{
6507#ifdef IEM_VERIFICATION_MODE_FULL
6508 /* Force the alternative path so we can ignore writes. */
6509 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6510 {
6511 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6512 {
6513 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6514 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6515 if (RT_FAILURE(rc2))
6516 pIemCpu->fProblematicMemory = true;
6517 }
6518 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6519 }
6520#endif
6521#ifdef IEM_LOG_MEMORY_WRITES
6522 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6523 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6524#endif
6525#ifdef IEM_VERIFICATION_MODE_MINIMAL
6526 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6527#endif
6528
6529 /** @todo This API may require some improving later. A private deal with PGM
6530 * regarding locking and unlocking needs to be struct. A couple of TLBs
6531 * living in PGM, but with publicly accessible inlined access methods
6532 * could perhaps be an even better solution. */
6533 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6534 GCPhysMem,
6535 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6536 pIemCpu->fBypassHandlers,
6537 ppvMem,
6538 pLock);
6539 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6540 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6541
6542#ifdef IEM_VERIFICATION_MODE_FULL
6543 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6544 pIemCpu->fProblematicMemory = true;
6545#endif
6546 return rc;
6547}
6548
6549
6550/**
6551 * Unmap a page previously mapped by iemMemPageMap.
6552 *
6553 * @param pIemCpu The IEM per CPU data.
6554 * @param GCPhysMem The physical address.
6555 * @param fAccess The intended access.
6556 * @param pvMem What iemMemPageMap returned.
6557 * @param pLock The PGM lock.
6558 */
6559DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6560{
6561 NOREF(pIemCpu);
6562 NOREF(GCPhysMem);
6563 NOREF(fAccess);
6564 NOREF(pvMem);
6565 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6566}
6567
6568
6569/**
6570 * Looks up a memory mapping entry.
6571 *
6572 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6573 * @param pIemCpu The IEM per CPU data.
6574 * @param pvMem The memory address.
6575 * @param fAccess The access to.
6576 */
6577DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6578{
6579 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
6580 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6581 if ( pIemCpu->aMemMappings[0].pv == pvMem
6582 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6583 return 0;
6584 if ( pIemCpu->aMemMappings[1].pv == pvMem
6585 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6586 return 1;
6587 if ( pIemCpu->aMemMappings[2].pv == pvMem
6588 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6589 return 2;
6590 return VERR_NOT_FOUND;
6591}
6592
6593
6594/**
6595 * Finds a free memmap entry when using iNextMapping doesn't work.
6596 *
6597 * @returns Memory mapping index, 1024 on failure.
6598 * @param pIemCpu The IEM per CPU data.
6599 */
6600IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6601{
6602 /*
6603 * The easy case.
6604 */
6605 if (pIemCpu->cActiveMappings == 0)
6606 {
6607 pIemCpu->iNextMapping = 1;
6608 return 0;
6609 }
6610
6611 /* There should be enough mappings for all instructions. */
6612 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6613
6614 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6615 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6616 return i;
6617
6618 AssertFailedReturn(1024);
6619}
6620
6621
6622/**
6623 * Commits a bounce buffer that needs writing back and unmaps it.
6624 *
6625 * @returns Strict VBox status code.
6626 * @param pIemCpu The IEM per CPU data.
6627 * @param iMemMap The index of the buffer to commit.
6628 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
6629 * Always false in ring-3, obviously.
6630 */
6631IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap, bool fPostponeFail)
6632{
6633 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6634 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6635#ifdef IN_RING3
6636 Assert(!fPostponeFail);
6637#endif
6638
6639 /*
6640 * Do the writing.
6641 */
6642#ifndef IEM_VERIFICATION_MODE_MINIMAL
6643 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6644 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6645 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6646 {
6647 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6648 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6649 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6650 if (!pIemCpu->fBypassHandlers)
6651 {
6652 /*
6653 * Carefully and efficiently dealing with access handler return
6654 * codes make this a little bloated.
6655 */
6656 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6657 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6658 pbBuf,
6659 cbFirst,
6660 PGMACCESSORIGIN_IEM);
6661 if (rcStrict == VINF_SUCCESS)
6662 {
6663 if (cbSecond)
6664 {
6665 rcStrict = PGMPhysWrite(pVM,
6666 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6667 pbBuf + cbFirst,
6668 cbSecond,
6669 PGMACCESSORIGIN_IEM);
6670 if (rcStrict == VINF_SUCCESS)
6671 { /* nothing */ }
6672 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6673 {
6674 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6675 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6676 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6677 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6678 }
6679# ifndef IN_RING3
6680 else if (fPostponeFail)
6681 {
6682 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6683 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6684 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6685 pIemCpu->aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6686 VMCPU_FF_SET(IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);
6687 return iemSetPassUpStatus(pIemCpu, rcStrict);
6688 }
6689# endif
6690 else
6691 {
6692 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6693 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6694 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6695 return rcStrict;
6696 }
6697 }
6698 }
6699 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6700 {
6701 if (!cbSecond)
6702 {
6703 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6704 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6705 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6706 }
6707 else
6708 {
6709 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6710 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6711 pbBuf + cbFirst,
6712 cbSecond,
6713 PGMACCESSORIGIN_IEM);
6714 if (rcStrict2 == VINF_SUCCESS)
6715 {
6716 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6717 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6718 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6719 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6720 }
6721 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6722 {
6723 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6724 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6725 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6726 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6727 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6728 }
6729# ifndef IN_RING3
6730 else if (fPostponeFail)
6731 {
6732 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6733 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6734 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6735 pIemCpu->aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6736 VMCPU_FF_SET(IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);
6737 return iemSetPassUpStatus(pIemCpu, rcStrict);
6738 }
6739# endif
6740 else
6741 {
6742 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6743 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6744 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6745 return rcStrict2;
6746 }
6747 }
6748 }
6749# ifndef IN_RING3
6750 else if (fPostponeFail)
6751 {
6752 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6753 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6754 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6755 if (!cbSecond)
6756 pIemCpu->aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
6757 else
6758 pIemCpu->aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
6759 VMCPU_FF_SET(IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);
6760 return iemSetPassUpStatus(pIemCpu, rcStrict);
6761 }
6762# endif
6763 else
6764 {
6765 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6766 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6767 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6768 return rcStrict;
6769 }
6770 }
6771 else
6772 {
6773 /*
6774 * No access handlers, much simpler.
6775 */
6776 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6777 if (RT_SUCCESS(rc))
6778 {
6779 if (cbSecond)
6780 {
6781 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6782 if (RT_SUCCESS(rc))
6783 { /* likely */ }
6784 else
6785 {
6786 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6787 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6788 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6789 return rc;
6790 }
6791 }
6792 }
6793 else
6794 {
6795 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6796 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6797 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6798 return rc;
6799 }
6800 }
6801 }
6802#endif
6803
6804#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6805 /*
6806 * Record the write(s).
6807 */
6808 if (!pIemCpu->fNoRem)
6809 {
6810 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6811 if (pEvtRec)
6812 {
6813 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6814 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6815 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6816 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6817 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6818 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6819 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6820 }
6821 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6822 {
6823 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6824 if (pEvtRec)
6825 {
6826 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6827 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6828 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6829 memcpy(pEvtRec->u.RamWrite.ab,
6830 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6831 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6832 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6833 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6834 }
6835 }
6836 }
6837#endif
6838#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6839 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6840 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6841 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6842 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6843 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6844 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6845
6846 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6847 g_cbIemWrote = cbWrote;
6848 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6849#endif
6850
6851 /*
6852 * Free the mapping entry.
6853 */
6854 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6855 Assert(pIemCpu->cActiveMappings != 0);
6856 pIemCpu->cActiveMappings--;
6857 return VINF_SUCCESS;
6858}
6859
6860
6861/**
6862 * iemMemMap worker that deals with a request crossing pages.
6863 */
6864IEM_STATIC VBOXSTRICTRC
6865iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6866{
6867 /*
6868 * Do the address translations.
6869 */
6870 RTGCPHYS GCPhysFirst;
6871 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6872 if (rcStrict != VINF_SUCCESS)
6873 return rcStrict;
6874
6875 RTGCPHYS GCPhysSecond;
6876 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
6877 fAccess, &GCPhysSecond);
6878 if (rcStrict != VINF_SUCCESS)
6879 return rcStrict;
6880 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6881
6882 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6883#ifdef IEM_VERIFICATION_MODE_FULL
6884 /*
6885 * Detect problematic memory when verifying so we can select
6886 * the right execution engine. (TLB: Redo this.)
6887 */
6888 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6889 {
6890 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6891 if (RT_SUCCESS(rc2))
6892 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6893 if (RT_FAILURE(rc2))
6894 pIemCpu->fProblematicMemory = true;
6895 }
6896#endif
6897
6898
6899 /*
6900 * Read in the current memory content if it's a read, execute or partial
6901 * write access.
6902 */
6903 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6904 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6905 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6906
6907 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6908 {
6909 if (!pIemCpu->fBypassHandlers)
6910 {
6911 /*
6912 * Must carefully deal with access handler status codes here,
6913 * makes the code a bit bloated.
6914 */
6915 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6916 if (rcStrict == VINF_SUCCESS)
6917 {
6918 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6919 if (rcStrict == VINF_SUCCESS)
6920 { /*likely */ }
6921 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6922 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6923 else
6924 {
6925 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6926 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6927 return rcStrict;
6928 }
6929 }
6930 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6931 {
6932 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6933 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6934 {
6935 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6936 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6937 }
6938 else
6939 {
6940 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6941 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6942 return rcStrict2;
6943 }
6944 }
6945 else
6946 {
6947 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6948 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6949 return rcStrict;
6950 }
6951 }
6952 else
6953 {
6954 /*
6955 * No informational status codes here, much more straight forward.
6956 */
6957 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6958 if (RT_SUCCESS(rc))
6959 {
6960 Assert(rc == VINF_SUCCESS);
6961 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6962 if (RT_SUCCESS(rc))
6963 Assert(rc == VINF_SUCCESS);
6964 else
6965 {
6966 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6967 return rc;
6968 }
6969 }
6970 else
6971 {
6972 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6973 return rc;
6974 }
6975 }
6976
6977#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6978 if ( !pIemCpu->fNoRem
6979 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6980 {
6981 /*
6982 * Record the reads.
6983 */
6984 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6985 if (pEvtRec)
6986 {
6987 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6988 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6989 pEvtRec->u.RamRead.cb = cbFirstPage;
6990 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6991 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6992 }
6993 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6994 if (pEvtRec)
6995 {
6996 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6997 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6998 pEvtRec->u.RamRead.cb = cbSecondPage;
6999 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7000 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7001 }
7002 }
7003#endif
7004 }
7005#ifdef VBOX_STRICT
7006 else
7007 memset(pbBuf, 0xcc, cbMem);
7008 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
7009 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
7010#endif
7011
7012 /*
7013 * Commit the bounce buffer entry.
7014 */
7015 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
7016 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
7017 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
7018 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
7019 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
7020 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
7021 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
7022 pIemCpu->iNextMapping = iMemMap + 1;
7023 pIemCpu->cActiveMappings++;
7024
7025 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7026 *ppvMem = pbBuf;
7027 return VINF_SUCCESS;
7028}
7029
7030
7031/**
7032 * iemMemMap woker that deals with iemMemPageMap failures.
7033 */
7034IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
7035 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
7036{
7037 /*
7038 * Filter out conditions we can handle and the ones which shouldn't happen.
7039 */
7040 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
7041 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
7042 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
7043 {
7044 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
7045 return rcMap;
7046 }
7047 pIemCpu->cPotentialExits++;
7048
7049 /*
7050 * Read in the current memory content if it's a read, execute or partial
7051 * write access.
7052 */
7053 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
7054 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
7055 {
7056 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
7057 memset(pbBuf, 0xff, cbMem);
7058 else
7059 {
7060 int rc;
7061 if (!pIemCpu->fBypassHandlers)
7062 {
7063 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
7064 if (rcStrict == VINF_SUCCESS)
7065 { /* nothing */ }
7066 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7067 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
7068 else
7069 {
7070 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
7071 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7072 return rcStrict;
7073 }
7074 }
7075 else
7076 {
7077 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
7078 if (RT_SUCCESS(rc))
7079 { /* likely */ }
7080 else
7081 {
7082 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
7083 GCPhysFirst, rc));
7084 return rc;
7085 }
7086 }
7087 }
7088
7089#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7090 if ( !pIemCpu->fNoRem
7091 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
7092 {
7093 /*
7094 * Record the read.
7095 */
7096 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7097 if (pEvtRec)
7098 {
7099 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
7100 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
7101 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
7102 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7103 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7104 }
7105 }
7106#endif
7107 }
7108#ifdef VBOX_STRICT
7109 else
7110 memset(pbBuf, 0xcc, cbMem);
7111#endif
7112#ifdef VBOX_STRICT
7113 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
7114 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
7115#endif
7116
7117 /*
7118 * Commit the bounce buffer entry.
7119 */
7120 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
7121 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
7122 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
7123 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
7124 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
7125 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
7126 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
7127 pIemCpu->iNextMapping = iMemMap + 1;
7128 pIemCpu->cActiveMappings++;
7129
7130 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7131 *ppvMem = pbBuf;
7132 return VINF_SUCCESS;
7133}
7134
7135
7136
7137/**
7138 * Maps the specified guest memory for the given kind of access.
7139 *
7140 * This may be using bounce buffering of the memory if it's crossing a page
7141 * boundary or if there is an access handler installed for any of it. Because
7142 * of lock prefix guarantees, we're in for some extra clutter when this
7143 * happens.
7144 *
7145 * This may raise a \#GP, \#SS, \#PF or \#AC.
7146 *
7147 * @returns VBox strict status code.
7148 *
7149 * @param pIemCpu The IEM per CPU data.
7150 * @param ppvMem Where to return the pointer to the mapped
7151 * memory.
7152 * @param cbMem The number of bytes to map. This is usually 1,
7153 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7154 * string operations it can be up to a page.
7155 * @param iSegReg The index of the segment register to use for
7156 * this access. The base and limits are checked.
7157 * Use UINT8_MAX to indicate that no segmentation
7158 * is required (for IDT, GDT and LDT accesses).
7159 * @param GCPtrMem The address of the guest memory.
7160 * @param fAccess How the memory is being accessed. The
7161 * IEM_ACCESS_TYPE_XXX bit is used to figure out
7162 * how to map the memory, while the
7163 * IEM_ACCESS_WHAT_XXX bit is used when raising
7164 * exceptions.
7165 */
7166IEM_STATIC VBOXSTRICTRC
7167iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
7168{
7169 /*
7170 * Check the input and figure out which mapping entry to use.
7171 */
7172 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7173 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
7174 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
7175
7176 unsigned iMemMap = pIemCpu->iNextMapping;
7177 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
7178 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7179 {
7180 iMemMap = iemMemMapFindFree(pIemCpu);
7181 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
7182 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
7183 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
7184 pIemCpu->aMemMappings[2].fAccess),
7185 VERR_IEM_IPE_9);
7186 }
7187
7188 /*
7189 * Map the memory, checking that we can actually access it. If something
7190 * slightly complicated happens, fall back on bounce buffering.
7191 */
7192 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7193 if (rcStrict != VINF_SUCCESS)
7194 return rcStrict;
7195
7196 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
7197 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
7198
7199 RTGCPHYS GCPhysFirst;
7200 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
7201 if (rcStrict != VINF_SUCCESS)
7202 return rcStrict;
7203
7204 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7205 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7206 if (fAccess & IEM_ACCESS_TYPE_READ)
7207 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7208
7209 void *pvMem;
7210 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7211 if (rcStrict != VINF_SUCCESS)
7212 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
7213
7214 /*
7215 * Fill in the mapping table entry.
7216 */
7217 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7218 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7219 pIemCpu->iNextMapping = iMemMap + 1;
7220 pIemCpu->cActiveMappings++;
7221
7222 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7223 *ppvMem = pvMem;
7224 return VINF_SUCCESS;
7225}
7226
7227
7228/**
7229 * Commits the guest memory if bounce buffered and unmaps it.
7230 *
7231 * @returns Strict VBox status code.
7232 * @param pIemCpu The IEM per CPU data.
7233 * @param pvMem The mapping.
7234 * @param fAccess The kind of access.
7235 */
7236IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7237{
7238 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7239 AssertReturn(iMemMap >= 0, iMemMap);
7240
7241 /* If it's bounce buffered, we may need to write back the buffer. */
7242 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7243 {
7244 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7245 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap, false /*fPostponeFail*/);
7246 }
7247 /* Otherwise unlock it. */
7248 else
7249 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7250
7251 /* Free the entry. */
7252 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7253 Assert(pIemCpu->cActiveMappings != 0);
7254 pIemCpu->cActiveMappings--;
7255 return VINF_SUCCESS;
7256}
7257
7258
7259#ifndef IN_RING3
7260/**
7261 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7262 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7263 *
7264 * Allows the instruction to be completed and retired, while the IEM user will
7265 * return to ring-3 immediately afterwards and do the postponed writes there.
7266 *
7267 * @returns VBox status code (no strict statuses). Caller must check
7268 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7269 * @param pIemCpu The IEM per CPU data.
7270 * @param pvMem The mapping.
7271 * @param fAccess The kind of access.
7272 */
7273IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7274{
7275 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7276 AssertReturn(iMemMap >= 0, iMemMap);
7277
7278 /* If it's bounce buffered, we may need to write back the buffer. */
7279 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7280 {
7281 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7282 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap, true /*fPostponeFail*/);
7283 }
7284 /* Otherwise unlock it. */
7285 else
7286 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7287
7288 /* Free the entry. */
7289 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7290 Assert(pIemCpu->cActiveMappings != 0);
7291 pIemCpu->cActiveMappings--;
7292 return VINF_SUCCESS;
7293}
7294#endif
7295
7296
7297/**
7298 * Rollbacks mappings, releasing page locks and such.
7299 *
7300 * The caller shall only call this after checking cActiveMappings.
7301 *
7302 * @returns Strict VBox status code to pass up.
7303 * @param pIemCpu The IEM per CPU data.
7304 */
7305IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7306{
7307 Assert(pIemCpu->cActiveMappings > 0);
7308
7309 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7310 while (iMemMap-- > 0)
7311 {
7312 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7313 if (fAccess != IEM_ACCESS_INVALID)
7314 {
7315 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7316 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7317 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7318 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7319 Assert(pIemCpu->cActiveMappings > 0);
7320 pIemCpu->cActiveMappings--;
7321 }
7322 }
7323}
7324
7325
7326/**
7327 * Fetches a data byte.
7328 *
7329 * @returns Strict VBox status code.
7330 * @param pIemCpu The IEM per CPU data.
7331 * @param pu8Dst Where to return the byte.
7332 * @param iSegReg The index of the segment register to use for
7333 * this access. The base and limits are checked.
7334 * @param GCPtrMem The address of the guest memory.
7335 */
7336IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7337{
7338 /* The lazy approach for now... */
7339 uint8_t const *pu8Src;
7340 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7341 if (rc == VINF_SUCCESS)
7342 {
7343 *pu8Dst = *pu8Src;
7344 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7345 }
7346 return rc;
7347}
7348
7349
7350/**
7351 * Fetches a data word.
7352 *
7353 * @returns Strict VBox status code.
7354 * @param pIemCpu The IEM per CPU data.
7355 * @param pu16Dst Where to return the word.
7356 * @param iSegReg The index of the segment register to use for
7357 * this access. The base and limits are checked.
7358 * @param GCPtrMem The address of the guest memory.
7359 */
7360IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7361{
7362 /* The lazy approach for now... */
7363 uint16_t const *pu16Src;
7364 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7365 if (rc == VINF_SUCCESS)
7366 {
7367 *pu16Dst = *pu16Src;
7368 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7369 }
7370 return rc;
7371}
7372
7373
7374/**
7375 * Fetches a data dword.
7376 *
7377 * @returns Strict VBox status code.
7378 * @param pIemCpu The IEM per CPU data.
7379 * @param pu32Dst Where to return the dword.
7380 * @param iSegReg The index of the segment register to use for
7381 * this access. The base and limits are checked.
7382 * @param GCPtrMem The address of the guest memory.
7383 */
7384IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7385{
7386 /* The lazy approach for now... */
7387 uint32_t const *pu32Src;
7388 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7389 if (rc == VINF_SUCCESS)
7390 {
7391 *pu32Dst = *pu32Src;
7392 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7393 }
7394 return rc;
7395}
7396
7397
7398#ifdef SOME_UNUSED_FUNCTION
7399/**
7400 * Fetches a data dword and sign extends it to a qword.
7401 *
7402 * @returns Strict VBox status code.
7403 * @param pIemCpu The IEM per CPU data.
7404 * @param pu64Dst Where to return the sign extended value.
7405 * @param iSegReg The index of the segment register to use for
7406 * this access. The base and limits are checked.
7407 * @param GCPtrMem The address of the guest memory.
7408 */
7409IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7410{
7411 /* The lazy approach for now... */
7412 int32_t const *pi32Src;
7413 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7414 if (rc == VINF_SUCCESS)
7415 {
7416 *pu64Dst = *pi32Src;
7417 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7418 }
7419#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7420 else
7421 *pu64Dst = 0;
7422#endif
7423 return rc;
7424}
7425#endif
7426
7427
7428/**
7429 * Fetches a data qword.
7430 *
7431 * @returns Strict VBox status code.
7432 * @param pIemCpu The IEM per CPU data.
7433 * @param pu64Dst Where to return the qword.
7434 * @param iSegReg The index of the segment register to use for
7435 * this access. The base and limits are checked.
7436 * @param GCPtrMem The address of the guest memory.
7437 */
7438IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7439{
7440 /* The lazy approach for now... */
7441 uint64_t const *pu64Src;
7442 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7443 if (rc == VINF_SUCCESS)
7444 {
7445 *pu64Dst = *pu64Src;
7446 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7447 }
7448 return rc;
7449}
7450
7451
7452/**
7453 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7454 *
7455 * @returns Strict VBox status code.
7456 * @param pIemCpu The IEM per CPU data.
7457 * @param pu64Dst Where to return the qword.
7458 * @param iSegReg The index of the segment register to use for
7459 * this access. The base and limits are checked.
7460 * @param GCPtrMem The address of the guest memory.
7461 */
7462IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7463{
7464 /* The lazy approach for now... */
7465 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7466 if (RT_UNLIKELY(GCPtrMem & 15))
7467 return iemRaiseGeneralProtectionFault0(pIemCpu);
7468
7469 uint64_t const *pu64Src;
7470 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7471 if (rc == VINF_SUCCESS)
7472 {
7473 *pu64Dst = *pu64Src;
7474 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7475 }
7476 return rc;
7477}
7478
7479
7480/**
7481 * Fetches a data tword.
7482 *
7483 * @returns Strict VBox status code.
7484 * @param pIemCpu The IEM per CPU data.
7485 * @param pr80Dst Where to return the tword.
7486 * @param iSegReg The index of the segment register to use for
7487 * this access. The base and limits are checked.
7488 * @param GCPtrMem The address of the guest memory.
7489 */
7490IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7491{
7492 /* The lazy approach for now... */
7493 PCRTFLOAT80U pr80Src;
7494 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7495 if (rc == VINF_SUCCESS)
7496 {
7497 *pr80Dst = *pr80Src;
7498 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7499 }
7500 return rc;
7501}
7502
7503
7504/**
7505 * Fetches a data dqword (double qword), generally SSE related.
7506 *
7507 * @returns Strict VBox status code.
7508 * @param pIemCpu The IEM per CPU data.
7509 * @param pu128Dst Where to return the qword.
7510 * @param iSegReg The index of the segment register to use for
7511 * this access. The base and limits are checked.
7512 * @param GCPtrMem The address of the guest memory.
7513 */
7514IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7515{
7516 /* The lazy approach for now... */
7517 uint128_t const *pu128Src;
7518 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7519 if (rc == VINF_SUCCESS)
7520 {
7521 *pu128Dst = *pu128Src;
7522 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7523 }
7524 return rc;
7525}
7526
7527
7528/**
7529 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7530 * related.
7531 *
7532 * Raises \#GP(0) if not aligned.
7533 *
7534 * @returns Strict VBox status code.
7535 * @param pIemCpu The IEM per CPU data.
7536 * @param pu128Dst Where to return the qword.
7537 * @param iSegReg The index of the segment register to use for
7538 * this access. The base and limits are checked.
7539 * @param GCPtrMem The address of the guest memory.
7540 */
7541IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7542{
7543 /* The lazy approach for now... */
7544 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7545 if ( (GCPtrMem & 15)
7546 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7547 return iemRaiseGeneralProtectionFault0(pIemCpu);
7548
7549 uint128_t const *pu128Src;
7550 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7551 if (rc == VINF_SUCCESS)
7552 {
7553 *pu128Dst = *pu128Src;
7554 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7555 }
7556 return rc;
7557}
7558
7559
7560
7561
7562/**
7563 * Fetches a descriptor register (lgdt, lidt).
7564 *
7565 * @returns Strict VBox status code.
7566 * @param pIemCpu The IEM per CPU data.
7567 * @param pcbLimit Where to return the limit.
7568 * @param pGCPtrBase Where to return the base.
7569 * @param iSegReg The index of the segment register to use for
7570 * this access. The base and limits are checked.
7571 * @param GCPtrMem The address of the guest memory.
7572 * @param enmOpSize The effective operand size.
7573 */
7574IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7575 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7576{
7577 /*
7578 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7579 * little special:
7580 * - The two reads are done separately.
7581 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7582 * - We suspect the 386 to actually commit the limit before the base in
7583 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7584 * don't try emulate this eccentric behavior, because it's not well
7585 * enough understood and rather hard to trigger.
7586 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7587 */
7588 VBOXSTRICTRC rcStrict;
7589 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7590 {
7591 rcStrict = iemMemFetchDataU16(pIemCpu, pcbLimit, iSegReg, GCPtrMem);
7592 if (rcStrict == VINF_SUCCESS)
7593 rcStrict = iemMemFetchDataU64(pIemCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7594 }
7595 else
7596 {
7597 uint32_t uTmp;
7598 if (enmOpSize == IEMMODE_32BIT)
7599 {
7600 if (IEM_GET_TARGET_CPU(pIemCpu) != IEMTARGETCPU_486)
7601 {
7602 rcStrict = iemMemFetchDataU16(pIemCpu, pcbLimit, iSegReg, GCPtrMem);
7603 if (rcStrict == VINF_SUCCESS)
7604 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem + 2);
7605 }
7606 else
7607 {
7608 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem);
7609 if (rcStrict == VINF_SUCCESS)
7610 {
7611 *pcbLimit = (uint16_t)uTmp;
7612 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem + 2);
7613 }
7614 }
7615 if (rcStrict == VINF_SUCCESS)
7616 *pGCPtrBase = uTmp;
7617 }
7618 else
7619 {
7620 rcStrict = iemMemFetchDataU16(pIemCpu, pcbLimit, iSegReg, GCPtrMem);
7621 if (rcStrict == VINF_SUCCESS)
7622 {
7623 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem + 2);
7624 if (rcStrict == VINF_SUCCESS)
7625 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7626 }
7627 }
7628 }
7629 return rcStrict;
7630}
7631
7632
7633
7634/**
7635 * Stores a data byte.
7636 *
7637 * @returns Strict VBox status code.
7638 * @param pIemCpu The IEM per CPU data.
7639 * @param iSegReg The index of the segment register to use for
7640 * this access. The base and limits are checked.
7641 * @param GCPtrMem The address of the guest memory.
7642 * @param u8Value The value to store.
7643 */
7644IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7645{
7646 /* The lazy approach for now... */
7647 uint8_t *pu8Dst;
7648 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7649 if (rc == VINF_SUCCESS)
7650 {
7651 *pu8Dst = u8Value;
7652 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7653 }
7654 return rc;
7655}
7656
7657
7658/**
7659 * Stores a data word.
7660 *
7661 * @returns Strict VBox status code.
7662 * @param pIemCpu The IEM per CPU data.
7663 * @param iSegReg The index of the segment register to use for
7664 * this access. The base and limits are checked.
7665 * @param GCPtrMem The address of the guest memory.
7666 * @param u16Value The value to store.
7667 */
7668IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7669{
7670 /* The lazy approach for now... */
7671 uint16_t *pu16Dst;
7672 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7673 if (rc == VINF_SUCCESS)
7674 {
7675 *pu16Dst = u16Value;
7676 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7677 }
7678 return rc;
7679}
7680
7681
7682/**
7683 * Stores a data dword.
7684 *
7685 * @returns Strict VBox status code.
7686 * @param pIemCpu The IEM per CPU data.
7687 * @param iSegReg The index of the segment register to use for
7688 * this access. The base and limits are checked.
7689 * @param GCPtrMem The address of the guest memory.
7690 * @param u32Value The value to store.
7691 */
7692IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7693{
7694 /* The lazy approach for now... */
7695 uint32_t *pu32Dst;
7696 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7697 if (rc == VINF_SUCCESS)
7698 {
7699 *pu32Dst = u32Value;
7700 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7701 }
7702 return rc;
7703}
7704
7705
7706/**
7707 * Stores a data qword.
7708 *
7709 * @returns Strict VBox status code.
7710 * @param pIemCpu The IEM per CPU data.
7711 * @param iSegReg The index of the segment register to use for
7712 * this access. The base and limits are checked.
7713 * @param GCPtrMem The address of the guest memory.
7714 * @param u64Value The value to store.
7715 */
7716IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7717{
7718 /* The lazy approach for now... */
7719 uint64_t *pu64Dst;
7720 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7721 if (rc == VINF_SUCCESS)
7722 {
7723 *pu64Dst = u64Value;
7724 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7725 }
7726 return rc;
7727}
7728
7729
7730/**
7731 * Stores a data dqword.
7732 *
7733 * @returns Strict VBox status code.
7734 * @param pIemCpu The IEM per CPU data.
7735 * @param iSegReg The index of the segment register to use for
7736 * this access. The base and limits are checked.
7737 * @param GCPtrMem The address of the guest memory.
7738 * @param u128Value The value to store.
7739 */
7740IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7741{
7742 /* The lazy approach for now... */
7743 uint128_t *pu128Dst;
7744 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7745 if (rc == VINF_SUCCESS)
7746 {
7747 *pu128Dst = u128Value;
7748 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7749 }
7750 return rc;
7751}
7752
7753
7754/**
7755 * Stores a data dqword, SSE aligned.
7756 *
7757 * @returns Strict VBox status code.
7758 * @param pIemCpu The IEM per CPU data.
7759 * @param iSegReg The index of the segment register to use for
7760 * this access. The base and limits are checked.
7761 * @param GCPtrMem The address of the guest memory.
7762 * @param u128Value The value to store.
7763 */
7764IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7765{
7766 /* The lazy approach for now... */
7767 if ( (GCPtrMem & 15)
7768 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7769 return iemRaiseGeneralProtectionFault0(pIemCpu);
7770
7771 uint128_t *pu128Dst;
7772 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7773 if (rc == VINF_SUCCESS)
7774 {
7775 *pu128Dst = u128Value;
7776 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7777 }
7778 return rc;
7779}
7780
7781
7782/**
7783 * Stores a descriptor register (sgdt, sidt).
7784 *
7785 * @returns Strict VBox status code.
7786 * @param pIemCpu The IEM per CPU data.
7787 * @param cbLimit The limit.
7788 * @param GCPtrBase The base address.
7789 * @param iSegReg The index of the segment register to use for
7790 * this access. The base and limits are checked.
7791 * @param GCPtrMem The address of the guest memory.
7792 */
7793IEM_STATIC VBOXSTRICTRC
7794iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
7795{
7796 /*
7797 * The SIDT and SGDT instructions actually stores the data using two
7798 * independent writes. The instructions does not respond to opsize prefixes.
7799 */
7800 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pIemCpu, iSegReg, GCPtrMem, cbLimit);
7801 if (rcStrict == VINF_SUCCESS)
7802 {
7803 if (pIemCpu->enmCpuMode == IEMMODE_16BIT)
7804 rcStrict = iemMemStoreDataU32(pIemCpu, iSegReg, GCPtrMem + 2,
7805 IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_286
7806 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7807 else if (pIemCpu->enmCpuMode == IEMMODE_32BIT)
7808 rcStrict = iemMemStoreDataU32(pIemCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7809 else
7810 rcStrict = iemMemStoreDataU64(pIemCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7811 }
7812 return rcStrict;
7813}
7814
7815
7816/**
7817 * Pushes a word onto the stack.
7818 *
7819 * @returns Strict VBox status code.
7820 * @param pIemCpu The IEM per CPU data.
7821 * @param u16Value The value to push.
7822 */
7823IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7824{
7825 /* Increment the stack pointer. */
7826 uint64_t uNewRsp;
7827 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7828 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7829
7830 /* Write the word the lazy way. */
7831 uint16_t *pu16Dst;
7832 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7833 if (rc == VINF_SUCCESS)
7834 {
7835 *pu16Dst = u16Value;
7836 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7837 }
7838
7839 /* Commit the new RSP value unless we an access handler made trouble. */
7840 if (rc == VINF_SUCCESS)
7841 pCtx->rsp = uNewRsp;
7842
7843 return rc;
7844}
7845
7846
7847/**
7848 * Pushes a dword onto the stack.
7849 *
7850 * @returns Strict VBox status code.
7851 * @param pIemCpu The IEM per CPU data.
7852 * @param u32Value The value to push.
7853 */
7854IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7855{
7856 /* Increment the stack pointer. */
7857 uint64_t uNewRsp;
7858 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7859 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7860
7861 /* Write the dword the lazy way. */
7862 uint32_t *pu32Dst;
7863 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7864 if (rc == VINF_SUCCESS)
7865 {
7866 *pu32Dst = u32Value;
7867 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7868 }
7869
7870 /* Commit the new RSP value unless we an access handler made trouble. */
7871 if (rc == VINF_SUCCESS)
7872 pCtx->rsp = uNewRsp;
7873
7874 return rc;
7875}
7876
7877
7878/**
7879 * Pushes a dword segment register value onto the stack.
7880 *
7881 * @returns Strict VBox status code.
7882 * @param pIemCpu The IEM per CPU data.
7883 * @param u32Value The value to push.
7884 */
7885IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7886{
7887 /* Increment the stack pointer. */
7888 uint64_t uNewRsp;
7889 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7890 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7891
7892 VBOXSTRICTRC rc;
7893 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7894 {
7895 /* The recompiler writes a full dword. */
7896 uint32_t *pu32Dst;
7897 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7898 if (rc == VINF_SUCCESS)
7899 {
7900 *pu32Dst = u32Value;
7901 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7902 }
7903 }
7904 else
7905 {
7906 /* The intel docs talks about zero extending the selector register
7907 value. My actual intel CPU here might be zero extending the value
7908 but it still only writes the lower word... */
7909 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7910 * happens when crossing an electric page boundrary, is the high word checked
7911 * for write accessibility or not? Probably it is. What about segment limits?
7912 * It appears this behavior is also shared with trap error codes.
7913 *
7914 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7915 * ancient hardware when it actually did change. */
7916 uint16_t *pu16Dst;
7917 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7918 if (rc == VINF_SUCCESS)
7919 {
7920 *pu16Dst = (uint16_t)u32Value;
7921 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7922 }
7923 }
7924
7925 /* Commit the new RSP value unless we an access handler made trouble. */
7926 if (rc == VINF_SUCCESS)
7927 pCtx->rsp = uNewRsp;
7928
7929 return rc;
7930}
7931
7932
7933/**
7934 * Pushes a qword onto the stack.
7935 *
7936 * @returns Strict VBox status code.
7937 * @param pIemCpu The IEM per CPU data.
7938 * @param u64Value The value to push.
7939 */
7940IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7941{
7942 /* Increment the stack pointer. */
7943 uint64_t uNewRsp;
7944 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7945 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7946
7947 /* Write the word the lazy way. */
7948 uint64_t *pu64Dst;
7949 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7950 if (rc == VINF_SUCCESS)
7951 {
7952 *pu64Dst = u64Value;
7953 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7954 }
7955
7956 /* Commit the new RSP value unless we an access handler made trouble. */
7957 if (rc == VINF_SUCCESS)
7958 pCtx->rsp = uNewRsp;
7959
7960 return rc;
7961}
7962
7963
7964/**
7965 * Pops a word from the stack.
7966 *
7967 * @returns Strict VBox status code.
7968 * @param pIemCpu The IEM per CPU data.
7969 * @param pu16Value Where to store the popped value.
7970 */
7971IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7972{
7973 /* Increment the stack pointer. */
7974 uint64_t uNewRsp;
7975 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7976 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7977
7978 /* Write the word the lazy way. */
7979 uint16_t const *pu16Src;
7980 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7981 if (rc == VINF_SUCCESS)
7982 {
7983 *pu16Value = *pu16Src;
7984 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7985
7986 /* Commit the new RSP value. */
7987 if (rc == VINF_SUCCESS)
7988 pCtx->rsp = uNewRsp;
7989 }
7990
7991 return rc;
7992}
7993
7994
7995/**
7996 * Pops a dword from the stack.
7997 *
7998 * @returns Strict VBox status code.
7999 * @param pIemCpu The IEM per CPU data.
8000 * @param pu32Value Where to store the popped value.
8001 */
8002IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
8003{
8004 /* Increment the stack pointer. */
8005 uint64_t uNewRsp;
8006 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8007 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
8008
8009 /* Write the word the lazy way. */
8010 uint32_t const *pu32Src;
8011 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8012 if (rc == VINF_SUCCESS)
8013 {
8014 *pu32Value = *pu32Src;
8015 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8016
8017 /* Commit the new RSP value. */
8018 if (rc == VINF_SUCCESS)
8019 pCtx->rsp = uNewRsp;
8020 }
8021
8022 return rc;
8023}
8024
8025
8026/**
8027 * Pops a qword from the stack.
8028 *
8029 * @returns Strict VBox status code.
8030 * @param pIemCpu The IEM per CPU data.
8031 * @param pu64Value Where to store the popped value.
8032 */
8033IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
8034{
8035 /* Increment the stack pointer. */
8036 uint64_t uNewRsp;
8037 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8038 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
8039
8040 /* Write the word the lazy way. */
8041 uint64_t const *pu64Src;
8042 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8043 if (rc == VINF_SUCCESS)
8044 {
8045 *pu64Value = *pu64Src;
8046 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8047
8048 /* Commit the new RSP value. */
8049 if (rc == VINF_SUCCESS)
8050 pCtx->rsp = uNewRsp;
8051 }
8052
8053 return rc;
8054}
8055
8056
8057/**
8058 * Pushes a word onto the stack, using a temporary stack pointer.
8059 *
8060 * @returns Strict VBox status code.
8061 * @param pIemCpu The IEM per CPU data.
8062 * @param u16Value The value to push.
8063 * @param pTmpRsp Pointer to the temporary stack pointer.
8064 */
8065IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
8066{
8067 /* Increment the stack pointer. */
8068 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8069 RTUINT64U NewRsp = *pTmpRsp;
8070 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
8071
8072 /* Write the word the lazy way. */
8073 uint16_t *pu16Dst;
8074 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8075 if (rc == VINF_SUCCESS)
8076 {
8077 *pu16Dst = u16Value;
8078 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
8079 }
8080
8081 /* Commit the new RSP value unless we an access handler made trouble. */
8082 if (rc == VINF_SUCCESS)
8083 *pTmpRsp = NewRsp;
8084
8085 return rc;
8086}
8087
8088
8089/**
8090 * Pushes a dword onto the stack, using a temporary stack pointer.
8091 *
8092 * @returns Strict VBox status code.
8093 * @param pIemCpu The IEM per CPU data.
8094 * @param u32Value The value to push.
8095 * @param pTmpRsp Pointer to the temporary stack pointer.
8096 */
8097IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
8098{
8099 /* Increment the stack pointer. */
8100 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8101 RTUINT64U NewRsp = *pTmpRsp;
8102 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
8103
8104 /* Write the word the lazy way. */
8105 uint32_t *pu32Dst;
8106 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8107 if (rc == VINF_SUCCESS)
8108 {
8109 *pu32Dst = u32Value;
8110 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
8111 }
8112
8113 /* Commit the new RSP value unless we an access handler made trouble. */
8114 if (rc == VINF_SUCCESS)
8115 *pTmpRsp = NewRsp;
8116
8117 return rc;
8118}
8119
8120
8121/**
8122 * Pushes a dword onto the stack, using a temporary stack pointer.
8123 *
8124 * @returns Strict VBox status code.
8125 * @param pIemCpu The IEM per CPU data.
8126 * @param u64Value The value to push.
8127 * @param pTmpRsp Pointer to the temporary stack pointer.
8128 */
8129IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
8130{
8131 /* Increment the stack pointer. */
8132 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8133 RTUINT64U NewRsp = *pTmpRsp;
8134 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
8135
8136 /* Write the word the lazy way. */
8137 uint64_t *pu64Dst;
8138 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8139 if (rc == VINF_SUCCESS)
8140 {
8141 *pu64Dst = u64Value;
8142 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
8143 }
8144
8145 /* Commit the new RSP value unless we an access handler made trouble. */
8146 if (rc == VINF_SUCCESS)
8147 *pTmpRsp = NewRsp;
8148
8149 return rc;
8150}
8151
8152
8153/**
8154 * Pops a word from the stack, using a temporary stack pointer.
8155 *
8156 * @returns Strict VBox status code.
8157 * @param pIemCpu The IEM per CPU data.
8158 * @param pu16Value Where to store the popped value.
8159 * @param pTmpRsp Pointer to the temporary stack pointer.
8160 */
8161IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
8162{
8163 /* Increment the stack pointer. */
8164 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8165 RTUINT64U NewRsp = *pTmpRsp;
8166 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
8167
8168 /* Write the word the lazy way. */
8169 uint16_t const *pu16Src;
8170 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8171 if (rc == VINF_SUCCESS)
8172 {
8173 *pu16Value = *pu16Src;
8174 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8175
8176 /* Commit the new RSP value. */
8177 if (rc == VINF_SUCCESS)
8178 *pTmpRsp = NewRsp;
8179 }
8180
8181 return rc;
8182}
8183
8184
8185/**
8186 * Pops a dword from the stack, using a temporary stack pointer.
8187 *
8188 * @returns Strict VBox status code.
8189 * @param pIemCpu The IEM per CPU data.
8190 * @param pu32Value Where to store the popped value.
8191 * @param pTmpRsp Pointer to the temporary stack pointer.
8192 */
8193IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
8194{
8195 /* Increment the stack pointer. */
8196 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8197 RTUINT64U NewRsp = *pTmpRsp;
8198 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
8199
8200 /* Write the word the lazy way. */
8201 uint32_t const *pu32Src;
8202 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8203 if (rc == VINF_SUCCESS)
8204 {
8205 *pu32Value = *pu32Src;
8206 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8207
8208 /* Commit the new RSP value. */
8209 if (rc == VINF_SUCCESS)
8210 *pTmpRsp = NewRsp;
8211 }
8212
8213 return rc;
8214}
8215
8216
8217/**
8218 * Pops a qword from the stack, using a temporary stack pointer.
8219 *
8220 * @returns Strict VBox status code.
8221 * @param pIemCpu The IEM per CPU data.
8222 * @param pu64Value Where to store the popped value.
8223 * @param pTmpRsp Pointer to the temporary stack pointer.
8224 */
8225IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
8226{
8227 /* Increment the stack pointer. */
8228 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8229 RTUINT64U NewRsp = *pTmpRsp;
8230 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8231
8232 /* Write the word the lazy way. */
8233 uint64_t const *pu64Src;
8234 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8235 if (rcStrict == VINF_SUCCESS)
8236 {
8237 *pu64Value = *pu64Src;
8238 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8239
8240 /* Commit the new RSP value. */
8241 if (rcStrict == VINF_SUCCESS)
8242 *pTmpRsp = NewRsp;
8243 }
8244
8245 return rcStrict;
8246}
8247
8248
8249/**
8250 * Begin a special stack push (used by interrupt, exceptions and such).
8251 *
8252 * This will raise \#SS or \#PF if appropriate.
8253 *
8254 * @returns Strict VBox status code.
8255 * @param pIemCpu The IEM per CPU data.
8256 * @param cbMem The number of bytes to push onto the stack.
8257 * @param ppvMem Where to return the pointer to the stack memory.
8258 * As with the other memory functions this could be
8259 * direct access or bounce buffered access, so
8260 * don't commit register until the commit call
8261 * succeeds.
8262 * @param puNewRsp Where to return the new RSP value. This must be
8263 * passed unchanged to
8264 * iemMemStackPushCommitSpecial().
8265 */
8266IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8267{
8268 Assert(cbMem < UINT8_MAX);
8269 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8270 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8271 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8272}
8273
8274
8275/**
8276 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8277 *
8278 * This will update the rSP.
8279 *
8280 * @returns Strict VBox status code.
8281 * @param pIemCpu The IEM per CPU data.
8282 * @param pvMem The pointer returned by
8283 * iemMemStackPushBeginSpecial().
8284 * @param uNewRsp The new RSP value returned by
8285 * iemMemStackPushBeginSpecial().
8286 */
8287IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8288{
8289 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8290 if (rcStrict == VINF_SUCCESS)
8291 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8292 return rcStrict;
8293}
8294
8295
8296/**
8297 * Begin a special stack pop (used by iret, retf and such).
8298 *
8299 * This will raise \#SS or \#PF if appropriate.
8300 *
8301 * @returns Strict VBox status code.
8302 * @param pIemCpu The IEM per CPU data.
8303 * @param cbMem The number of bytes to push onto the stack.
8304 * @param ppvMem Where to return the pointer to the stack memory.
8305 * @param puNewRsp Where to return the new RSP value. This must be
8306 * passed unchanged to
8307 * iemMemStackPopCommitSpecial() or applied
8308 * manually if iemMemStackPopDoneSpecial() is used.
8309 */
8310IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8311{
8312 Assert(cbMem < UINT8_MAX);
8313 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8314 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8315 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8316}
8317
8318
8319/**
8320 * Continue a special stack pop (used by iret and retf).
8321 *
8322 * This will raise \#SS or \#PF if appropriate.
8323 *
8324 * @returns Strict VBox status code.
8325 * @param pIemCpu The IEM per CPU data.
8326 * @param cbMem The number of bytes to push onto the stack.
8327 * @param ppvMem Where to return the pointer to the stack memory.
8328 * @param puNewRsp Where to return the new RSP value. This must be
8329 * passed unchanged to
8330 * iemMemStackPopCommitSpecial() or applied
8331 * manually if iemMemStackPopDoneSpecial() is used.
8332 */
8333IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8334{
8335 Assert(cbMem < UINT8_MAX);
8336 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8337 RTUINT64U NewRsp;
8338 NewRsp.u = *puNewRsp;
8339 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8340 *puNewRsp = NewRsp.u;
8341 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8342}
8343
8344
8345/**
8346 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8347 *
8348 * This will update the rSP.
8349 *
8350 * @returns Strict VBox status code.
8351 * @param pIemCpu The IEM per CPU data.
8352 * @param pvMem The pointer returned by
8353 * iemMemStackPopBeginSpecial().
8354 * @param uNewRsp The new RSP value returned by
8355 * iemMemStackPopBeginSpecial().
8356 */
8357IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8358{
8359 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8360 if (rcStrict == VINF_SUCCESS)
8361 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8362 return rcStrict;
8363}
8364
8365
8366/**
8367 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8368 * iemMemStackPopContinueSpecial).
8369 *
8370 * The caller will manually commit the rSP.
8371 *
8372 * @returns Strict VBox status code.
8373 * @param pIemCpu The IEM per CPU data.
8374 * @param pvMem The pointer returned by
8375 * iemMemStackPopBeginSpecial() or
8376 * iemMemStackPopContinueSpecial().
8377 */
8378IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8379{
8380 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8381}
8382
8383
8384/**
8385 * Fetches a system table byte.
8386 *
8387 * @returns Strict VBox status code.
8388 * @param pIemCpu The IEM per CPU data.
8389 * @param pbDst Where to return the byte.
8390 * @param iSegReg The index of the segment register to use for
8391 * this access. The base and limits are checked.
8392 * @param GCPtrMem The address of the guest memory.
8393 */
8394IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8395{
8396 /* The lazy approach for now... */
8397 uint8_t const *pbSrc;
8398 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8399 if (rc == VINF_SUCCESS)
8400 {
8401 *pbDst = *pbSrc;
8402 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8403 }
8404 return rc;
8405}
8406
8407
8408/**
8409 * Fetches a system table word.
8410 *
8411 * @returns Strict VBox status code.
8412 * @param pIemCpu The IEM per CPU data.
8413 * @param pu16Dst Where to return the word.
8414 * @param iSegReg The index of the segment register to use for
8415 * this access. The base and limits are checked.
8416 * @param GCPtrMem The address of the guest memory.
8417 */
8418IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8419{
8420 /* The lazy approach for now... */
8421 uint16_t const *pu16Src;
8422 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8423 if (rc == VINF_SUCCESS)
8424 {
8425 *pu16Dst = *pu16Src;
8426 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8427 }
8428 return rc;
8429}
8430
8431
8432/**
8433 * Fetches a system table dword.
8434 *
8435 * @returns Strict VBox status code.
8436 * @param pIemCpu The IEM per CPU data.
8437 * @param pu32Dst Where to return the dword.
8438 * @param iSegReg The index of the segment register to use for
8439 * this access. The base and limits are checked.
8440 * @param GCPtrMem The address of the guest memory.
8441 */
8442IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8443{
8444 /* The lazy approach for now... */
8445 uint32_t const *pu32Src;
8446 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8447 if (rc == VINF_SUCCESS)
8448 {
8449 *pu32Dst = *pu32Src;
8450 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8451 }
8452 return rc;
8453}
8454
8455
8456/**
8457 * Fetches a system table qword.
8458 *
8459 * @returns Strict VBox status code.
8460 * @param pIemCpu The IEM per CPU data.
8461 * @param pu64Dst Where to return the qword.
8462 * @param iSegReg The index of the segment register to use for
8463 * this access. The base and limits are checked.
8464 * @param GCPtrMem The address of the guest memory.
8465 */
8466IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8467{
8468 /* The lazy approach for now... */
8469 uint64_t const *pu64Src;
8470 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8471 if (rc == VINF_SUCCESS)
8472 {
8473 *pu64Dst = *pu64Src;
8474 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8475 }
8476 return rc;
8477}
8478
8479
8480/**
8481 * Fetches a descriptor table entry with caller specified error code.
8482 *
8483 * @returns Strict VBox status code.
8484 * @param pIemCpu The IEM per CPU.
8485 * @param pDesc Where to return the descriptor table entry.
8486 * @param uSel The selector which table entry to fetch.
8487 * @param uXcpt The exception to raise on table lookup error.
8488 * @param uErrorCode The error code associated with the exception.
8489 */
8490IEM_STATIC VBOXSTRICTRC
8491iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8492{
8493 AssertPtr(pDesc);
8494 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8495
8496 /** @todo did the 286 require all 8 bytes to be accessible? */
8497 /*
8498 * Get the selector table base and check bounds.
8499 */
8500 RTGCPTR GCPtrBase;
8501 if (uSel & X86_SEL_LDT)
8502 {
8503 if ( !pCtx->ldtr.Attr.n.u1Present
8504 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8505 {
8506 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8507 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8508 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8509 uErrorCode, 0);
8510 }
8511
8512 Assert(pCtx->ldtr.Attr.n.u1Present);
8513 GCPtrBase = pCtx->ldtr.u64Base;
8514 }
8515 else
8516 {
8517 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8518 {
8519 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8520 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8521 uErrorCode, 0);
8522 }
8523 GCPtrBase = pCtx->gdtr.pGdt;
8524 }
8525
8526 /*
8527 * Read the legacy descriptor and maybe the long mode extensions if
8528 * required.
8529 */
8530 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8531 if (rcStrict == VINF_SUCCESS)
8532 {
8533 if ( !IEM_IS_LONG_MODE(pIemCpu)
8534 || pDesc->Legacy.Gen.u1DescType)
8535 pDesc->Long.au64[1] = 0;
8536 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8537 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8538 else
8539 {
8540 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8541 /** @todo is this the right exception? */
8542 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8543 }
8544 }
8545 return rcStrict;
8546}
8547
8548
8549/**
8550 * Fetches a descriptor table entry.
8551 *
8552 * @returns Strict VBox status code.
8553 * @param pIemCpu The IEM per CPU.
8554 * @param pDesc Where to return the descriptor table entry.
8555 * @param uSel The selector which table entry to fetch.
8556 * @param uXcpt The exception to raise on table lookup error.
8557 */
8558IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8559{
8560 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8561}
8562
8563
8564/**
8565 * Fakes a long mode stack selector for SS = 0.
8566 *
8567 * @param pDescSs Where to return the fake stack descriptor.
8568 * @param uDpl The DPL we want.
8569 */
8570IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8571{
8572 pDescSs->Long.au64[0] = 0;
8573 pDescSs->Long.au64[1] = 0;
8574 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8575 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8576 pDescSs->Long.Gen.u2Dpl = uDpl;
8577 pDescSs->Long.Gen.u1Present = 1;
8578 pDescSs->Long.Gen.u1Long = 1;
8579}
8580
8581
8582/**
8583 * Marks the selector descriptor as accessed (only non-system descriptors).
8584 *
8585 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8586 * will therefore skip the limit checks.
8587 *
8588 * @returns Strict VBox status code.
8589 * @param pIemCpu The IEM per CPU.
8590 * @param uSel The selector.
8591 */
8592IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8593{
8594 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8595
8596 /*
8597 * Get the selector table base and calculate the entry address.
8598 */
8599 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8600 ? pCtx->ldtr.u64Base
8601 : pCtx->gdtr.pGdt;
8602 GCPtr += uSel & X86_SEL_MASK;
8603
8604 /*
8605 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8606 * ugly stuff to avoid this. This will make sure it's an atomic access
8607 * as well more or less remove any question about 8-bit or 32-bit accesss.
8608 */
8609 VBOXSTRICTRC rcStrict;
8610 uint32_t volatile *pu32;
8611 if ((GCPtr & 3) == 0)
8612 {
8613 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8614 GCPtr += 2 + 2;
8615 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8616 if (rcStrict != VINF_SUCCESS)
8617 return rcStrict;
8618 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8619 }
8620 else
8621 {
8622 /* The misaligned GDT/LDT case, map the whole thing. */
8623 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8624 if (rcStrict != VINF_SUCCESS)
8625 return rcStrict;
8626 switch ((uintptr_t)pu32 & 3)
8627 {
8628 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8629 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8630 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8631 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8632 }
8633 }
8634
8635 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8636}
8637
8638/** @} */
8639
8640
8641/*
8642 * Include the C/C++ implementation of instruction.
8643 */
8644#include "IEMAllCImpl.cpp.h"
8645
8646
8647
8648/** @name "Microcode" macros.
8649 *
8650 * The idea is that we should be able to use the same code to interpret
8651 * instructions as well as recompiler instructions. Thus this obfuscation.
8652 *
8653 * @{
8654 */
8655#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8656#define IEM_MC_END() }
8657#define IEM_MC_PAUSE() do {} while (0)
8658#define IEM_MC_CONTINUE() do {} while (0)
8659
8660/** Internal macro. */
8661#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8662 do \
8663 { \
8664 VBOXSTRICTRC rcStrict2 = a_Expr; \
8665 if (rcStrict2 != VINF_SUCCESS) \
8666 return rcStrict2; \
8667 } while (0)
8668
8669#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8670#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8671#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8672#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8673#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8674#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8675#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8676
8677#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8678#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8679 do { \
8680 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8681 return iemRaiseDeviceNotAvailable(pIemCpu); \
8682 } while (0)
8683#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8684 do { \
8685 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8686 return iemRaiseMathFault(pIemCpu); \
8687 } while (0)
8688#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8689 do { \
8690 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8691 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8692 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8693 return iemRaiseUndefinedOpcode(pIemCpu); \
8694 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8695 return iemRaiseDeviceNotAvailable(pIemCpu); \
8696 } while (0)
8697#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
8698 do { \
8699 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8700 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8701 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse) \
8702 return iemRaiseUndefinedOpcode(pIemCpu); \
8703 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8704 return iemRaiseDeviceNotAvailable(pIemCpu); \
8705 } while (0)
8706#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8707 do { \
8708 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8709 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8710 return iemRaiseUndefinedOpcode(pIemCpu); \
8711 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8712 return iemRaiseDeviceNotAvailable(pIemCpu); \
8713 } while (0)
8714#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8715 do { \
8716 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8717 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8718 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8719 return iemRaiseUndefinedOpcode(pIemCpu); \
8720 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8721 return iemRaiseDeviceNotAvailable(pIemCpu); \
8722 } while (0)
8723#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8724 do { \
8725 if (pIemCpu->uCpl != 0) \
8726 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8727 } while (0)
8728
8729
8730#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8731#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8732#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8733#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8734#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8735#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8736#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8737 uint32_t a_Name; \
8738 uint32_t *a_pName = &a_Name
8739#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8740 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8741
8742#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8743#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8744
8745#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8746#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8747#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8748#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8749#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8750#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8751#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8752#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8753#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8754#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8755#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8756#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8757#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8758#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8759#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8760#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8761#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8762#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8763#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8764#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8765#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8766#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8767#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8768#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8769#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8770#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8771#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8772#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8773#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8774/** @note Not for IOPL or IF testing or modification. */
8775#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8776#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8777#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8778#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8779
8780#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8781#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8782#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8783#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8784#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8785#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8786#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8787#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8788#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8789#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8790#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8791 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8792
8793#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8794#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8795/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8796 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8797#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8798#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8799/** @note Not for IOPL or IF testing or modification. */
8800#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8801
8802#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8803#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8804#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8805 do { \
8806 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8807 *pu32Reg += (a_u32Value); \
8808 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8809 } while (0)
8810#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8811
8812#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8813#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8814#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8815 do { \
8816 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8817 *pu32Reg -= (a_u32Value); \
8818 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8819 } while (0)
8820#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8821#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
8822
8823#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8824#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8825#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8826#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8827#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8828#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8829#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8830
8831#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8832#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8833#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8834#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8835
8836#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8837#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8838#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8839
8840#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8841#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
8842#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8843
8844#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8845#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8846#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8847
8848#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8849#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8850#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8851
8852#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8853
8854#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8855
8856#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8857#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8858#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8859 do { \
8860 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8861 *pu32Reg &= (a_u32Value); \
8862 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8863 } while (0)
8864#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8865
8866#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8867#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8868#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8869 do { \
8870 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8871 *pu32Reg |= (a_u32Value); \
8872 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8873 } while (0)
8874#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8875
8876
8877/** @note Not for IOPL or IF modification. */
8878#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8879/** @note Not for IOPL or IF modification. */
8880#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8881/** @note Not for IOPL or IF modification. */
8882#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8883
8884#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8885
8886
8887#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8888 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8889#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8890 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8891#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8892 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8893#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8894 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8895#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8896 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8897#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8898 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8899#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8900 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8901
8902#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8903 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8904#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8905 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8906#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8907 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8908#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8909 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8910#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
8911 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
8912#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8913 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8914 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8915 } while (0)
8916#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8917 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8918 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8919 } while (0)
8920#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8921 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8922#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8923 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8924#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8925 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8926#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
8927 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
8928 = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
8929
8930#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8931 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8932#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8933 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8934#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8935 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8936
8937#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8938 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8939#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8940 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8941#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8942 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8943
8944#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8945 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8946#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8947 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8948#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8949 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8950
8951#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8952 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8953
8954#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8955 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8956#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8957 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8958#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8959 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8960#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8961 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8962
8963#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8964 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8965#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8966 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8967#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8968 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8969
8970#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8971 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8972#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8973 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8974
8975
8976
8977#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8978 do { \
8979 uint8_t u8Tmp; \
8980 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8981 (a_u16Dst) = u8Tmp; \
8982 } while (0)
8983#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8984 do { \
8985 uint8_t u8Tmp; \
8986 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8987 (a_u32Dst) = u8Tmp; \
8988 } while (0)
8989#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8990 do { \
8991 uint8_t u8Tmp; \
8992 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8993 (a_u64Dst) = u8Tmp; \
8994 } while (0)
8995#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8996 do { \
8997 uint16_t u16Tmp; \
8998 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8999 (a_u32Dst) = u16Tmp; \
9000 } while (0)
9001#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
9002 do { \
9003 uint16_t u16Tmp; \
9004 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
9005 (a_u64Dst) = u16Tmp; \
9006 } while (0)
9007#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
9008 do { \
9009 uint32_t u32Tmp; \
9010 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
9011 (a_u64Dst) = u32Tmp; \
9012 } while (0)
9013
9014#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
9015 do { \
9016 uint8_t u8Tmp; \
9017 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
9018 (a_u16Dst) = (int8_t)u8Tmp; \
9019 } while (0)
9020#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
9021 do { \
9022 uint8_t u8Tmp; \
9023 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
9024 (a_u32Dst) = (int8_t)u8Tmp; \
9025 } while (0)
9026#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
9027 do { \
9028 uint8_t u8Tmp; \
9029 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
9030 (a_u64Dst) = (int8_t)u8Tmp; \
9031 } while (0)
9032#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
9033 do { \
9034 uint16_t u16Tmp; \
9035 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
9036 (a_u32Dst) = (int16_t)u16Tmp; \
9037 } while (0)
9038#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
9039 do { \
9040 uint16_t u16Tmp; \
9041 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
9042 (a_u64Dst) = (int16_t)u16Tmp; \
9043 } while (0)
9044#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
9045 do { \
9046 uint32_t u32Tmp; \
9047 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
9048 (a_u64Dst) = (int32_t)u32Tmp; \
9049 } while (0)
9050
9051#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
9052 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
9053#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
9054 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
9055#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
9056 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
9057#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
9058 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
9059
9060#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
9061 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
9062#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
9063 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
9064#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
9065 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
9066#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
9067 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
9068
9069#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
9070#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
9071#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
9072#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
9073#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
9074#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
9075#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
9076 do { \
9077 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
9078 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
9079 } while (0)
9080
9081#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
9082 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
9083#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
9084 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
9085
9086
9087#define IEM_MC_PUSH_U16(a_u16Value) \
9088 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
9089#define IEM_MC_PUSH_U32(a_u32Value) \
9090 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
9091#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
9092 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
9093#define IEM_MC_PUSH_U64(a_u64Value) \
9094 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
9095
9096#define IEM_MC_POP_U16(a_pu16Value) \
9097 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
9098#define IEM_MC_POP_U32(a_pu32Value) \
9099 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
9100#define IEM_MC_POP_U64(a_pu64Value) \
9101 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
9102
9103/** Maps guest memory for direct or bounce buffered access.
9104 * The purpose is to pass it to an operand implementation, thus the a_iArg.
9105 * @remarks May return.
9106 */
9107#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
9108 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
9109
9110/** Maps guest memory for direct or bounce buffered access.
9111 * The purpose is to pass it to an operand implementation, thus the a_iArg.
9112 * @remarks May return.
9113 */
9114#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
9115 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
9116
9117/** Commits the memory and unmaps the guest memory.
9118 * @remarks May return.
9119 */
9120#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
9121 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
9122
9123/** Commits the memory and unmaps the guest memory unless the FPU status word
9124 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
9125 * that would cause FLD not to store.
9126 *
9127 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
9128 * store, while \#P will not.
9129 *
9130 * @remarks May in theory return - for now.
9131 */
9132#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
9133 do { \
9134 if ( !(a_u16FSW & X86_FSW_ES) \
9135 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
9136 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
9137 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
9138 } while (0)
9139
9140/** Calculate efficient address from R/M. */
9141#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
9142 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
9143
9144#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
9145#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
9146#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
9147#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
9148#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
9149#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
9150#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
9151
9152/**
9153 * Defers the rest of the instruction emulation to a C implementation routine
9154 * and returns, only taking the standard parameters.
9155 *
9156 * @param a_pfnCImpl The pointer to the C routine.
9157 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
9158 */
9159#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
9160
9161/**
9162 * Defers the rest of instruction emulation to a C implementation routine and
9163 * returns, taking one argument in addition to the standard ones.
9164 *
9165 * @param a_pfnCImpl The pointer to the C routine.
9166 * @param a0 The argument.
9167 */
9168#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
9169
9170/**
9171 * Defers the rest of the instruction emulation to a C implementation routine
9172 * and returns, taking two arguments in addition to the standard ones.
9173 *
9174 * @param a_pfnCImpl The pointer to the C routine.
9175 * @param a0 The first extra argument.
9176 * @param a1 The second extra argument.
9177 */
9178#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
9179
9180/**
9181 * Defers the rest of the instruction emulation to a C implementation routine
9182 * and returns, taking three arguments in addition to the standard ones.
9183 *
9184 * @param a_pfnCImpl The pointer to the C routine.
9185 * @param a0 The first extra argument.
9186 * @param a1 The second extra argument.
9187 * @param a2 The third extra argument.
9188 */
9189#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
9190
9191/**
9192 * Defers the rest of the instruction emulation to a C implementation routine
9193 * and returns, taking four arguments in addition to the standard ones.
9194 *
9195 * @param a_pfnCImpl The pointer to the C routine.
9196 * @param a0 The first extra argument.
9197 * @param a1 The second extra argument.
9198 * @param a2 The third extra argument.
9199 * @param a3 The fourth extra argument.
9200 */
9201#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
9202
9203/**
9204 * Defers the rest of the instruction emulation to a C implementation routine
9205 * and returns, taking two arguments in addition to the standard ones.
9206 *
9207 * @param a_pfnCImpl The pointer to the C routine.
9208 * @param a0 The first extra argument.
9209 * @param a1 The second extra argument.
9210 * @param a2 The third extra argument.
9211 * @param a3 The fourth extra argument.
9212 * @param a4 The fifth extra argument.
9213 */
9214#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
9215
9216/**
9217 * Defers the entire instruction emulation to a C implementation routine and
9218 * returns, only taking the standard parameters.
9219 *
9220 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9221 *
9222 * @param a_pfnCImpl The pointer to the C routine.
9223 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
9224 */
9225#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
9226
9227/**
9228 * Defers the entire instruction emulation to a C implementation routine and
9229 * returns, taking one argument in addition to the standard ones.
9230 *
9231 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9232 *
9233 * @param a_pfnCImpl The pointer to the C routine.
9234 * @param a0 The argument.
9235 */
9236#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
9237
9238/**
9239 * Defers the entire instruction emulation to a C implementation routine and
9240 * returns, taking two arguments in addition to the standard ones.
9241 *
9242 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9243 *
9244 * @param a_pfnCImpl The pointer to the C routine.
9245 * @param a0 The first extra argument.
9246 * @param a1 The second extra argument.
9247 */
9248#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
9249
9250/**
9251 * Defers the entire instruction emulation to a C implementation routine and
9252 * returns, taking three arguments in addition to the standard ones.
9253 *
9254 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9255 *
9256 * @param a_pfnCImpl The pointer to the C routine.
9257 * @param a0 The first extra argument.
9258 * @param a1 The second extra argument.
9259 * @param a2 The third extra argument.
9260 */
9261#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
9262
9263/**
9264 * Calls a FPU assembly implementation taking one visible argument.
9265 *
9266 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9267 * @param a0 The first extra argument.
9268 */
9269#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
9270 do { \
9271 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
9272 } while (0)
9273
9274/**
9275 * Calls a FPU assembly implementation taking two visible arguments.
9276 *
9277 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9278 * @param a0 The first extra argument.
9279 * @param a1 The second extra argument.
9280 */
9281#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9282 do { \
9283 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9284 } while (0)
9285
9286/**
9287 * Calls a FPU assembly implementation taking three visible arguments.
9288 *
9289 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9290 * @param a0 The first extra argument.
9291 * @param a1 The second extra argument.
9292 * @param a2 The third extra argument.
9293 */
9294#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9295 do { \
9296 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9297 } while (0)
9298
9299#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9300 do { \
9301 (a_FpuData).FSW = (a_FSW); \
9302 (a_FpuData).r80Result = *(a_pr80Value); \
9303 } while (0)
9304
9305/** Pushes FPU result onto the stack. */
9306#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9307 iemFpuPushResult(pIemCpu, &a_FpuData)
9308/** Pushes FPU result onto the stack and sets the FPUDP. */
9309#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9310 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9311
9312/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9313#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9314 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9315
9316/** Stores FPU result in a stack register. */
9317#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9318 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9319/** Stores FPU result in a stack register and pops the stack. */
9320#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9321 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9322/** Stores FPU result in a stack register and sets the FPUDP. */
9323#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9324 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9325/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9326 * stack. */
9327#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9328 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9329
9330/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9331#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9332 iemFpuUpdateOpcodeAndIp(pIemCpu)
9333/** Free a stack register (for FFREE and FFREEP). */
9334#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9335 iemFpuStackFree(pIemCpu, a_iStReg)
9336/** Increment the FPU stack pointer. */
9337#define IEM_MC_FPU_STACK_INC_TOP() \
9338 iemFpuStackIncTop(pIemCpu)
9339/** Decrement the FPU stack pointer. */
9340#define IEM_MC_FPU_STACK_DEC_TOP() \
9341 iemFpuStackDecTop(pIemCpu)
9342
9343/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9344#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9345 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9346/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9347#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9348 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9349/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9350#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9351 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9352/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9353#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9354 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9355/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9356 * stack. */
9357#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9358 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9359/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9360#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9361 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9362
9363/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9364#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9365 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9366/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9367 * stack. */
9368#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9369 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9370/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9371 * FPUDS. */
9372#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9373 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9374/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9375 * FPUDS. Pops stack. */
9376#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9377 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9378/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9379 * stack twice. */
9380#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9381 iemFpuStackUnderflowThenPopPop(pIemCpu)
9382/** Raises a FPU stack underflow exception for an instruction pushing a result
9383 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9384#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9385 iemFpuStackPushUnderflow(pIemCpu)
9386/** Raises a FPU stack underflow exception for an instruction pushing a result
9387 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9388#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9389 iemFpuStackPushUnderflowTwo(pIemCpu)
9390
9391/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9392 * FPUIP, FPUCS and FOP. */
9393#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9394 iemFpuStackPushOverflow(pIemCpu)
9395/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9396 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9397#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9398 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9399/** Prepares for using the FPU state.
9400 * Ensures that we can use the host FPU in the current context (RC+R0.
9401 * Ensures the guest FPU state in the CPUMCTX is up to date. */
9402#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pIemCpu)
9403/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
9404#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pIemCpu)
9405/** Actualizes the guest FPU state so it can be accessed and modified. */
9406#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pIemCpu)
9407
9408/** Prepares for using the SSE state.
9409 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
9410 * Ensures the guest SSE state in the CPUMCTX is up to date. */
9411#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pIemCpu)
9412/** Actualizes the guest XMM0..15 register state for read-only access. */
9413#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pIemCpu)
9414/** Actualizes the guest XMM0..15 register state for read-write access. */
9415#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pIemCpu)
9416
9417/**
9418 * Calls a MMX assembly implementation taking two visible arguments.
9419 *
9420 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9421 * @param a0 The first extra argument.
9422 * @param a1 The second extra argument.
9423 */
9424#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9425 do { \
9426 IEM_MC_PREPARE_FPU_USAGE(); \
9427 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9428 } while (0)
9429
9430/**
9431 * Calls a MMX assembly implementation taking three visible arguments.
9432 *
9433 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9434 * @param a0 The first extra argument.
9435 * @param a1 The second extra argument.
9436 * @param a2 The third extra argument.
9437 */
9438#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9439 do { \
9440 IEM_MC_PREPARE_FPU_USAGE(); \
9441 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9442 } while (0)
9443
9444
9445/**
9446 * Calls a SSE assembly implementation taking two visible arguments.
9447 *
9448 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9449 * @param a0 The first extra argument.
9450 * @param a1 The second extra argument.
9451 */
9452#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9453 do { \
9454 IEM_MC_PREPARE_SSE_USAGE(); \
9455 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9456 } while (0)
9457
9458/**
9459 * Calls a SSE assembly implementation taking three visible arguments.
9460 *
9461 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9462 * @param a0 The first extra argument.
9463 * @param a1 The second extra argument.
9464 * @param a2 The third extra argument.
9465 */
9466#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9467 do { \
9468 IEM_MC_PREPARE_SSE_USAGE(); \
9469 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9470 } while (0)
9471
9472/** @note Not for IOPL or IF testing. */
9473#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9474/** @note Not for IOPL or IF testing. */
9475#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9476/** @note Not for IOPL or IF testing. */
9477#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9478/** @note Not for IOPL or IF testing. */
9479#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9480/** @note Not for IOPL or IF testing. */
9481#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9482 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9483 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9484/** @note Not for IOPL or IF testing. */
9485#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9486 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9487 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9488/** @note Not for IOPL or IF testing. */
9489#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9490 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9491 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9492 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9493/** @note Not for IOPL or IF testing. */
9494#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9495 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9496 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9497 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9498#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9499#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9500#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9501/** @note Not for IOPL or IF testing. */
9502#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9503 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9504 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9505/** @note Not for IOPL or IF testing. */
9506#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9507 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9508 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9509/** @note Not for IOPL or IF testing. */
9510#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9511 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9512 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9513/** @note Not for IOPL or IF testing. */
9514#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9515 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9516 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9517/** @note Not for IOPL or IF testing. */
9518#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9519 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9520 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9521/** @note Not for IOPL or IF testing. */
9522#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9523 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9524 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9525#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9526#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9527
9528#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9529 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9530#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9531 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9532#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9533 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9534#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9535 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9536#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9537 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9538#define IEM_MC_IF_FCW_IM() \
9539 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9540
9541#define IEM_MC_ELSE() } else {
9542#define IEM_MC_ENDIF() } do {} while (0)
9543
9544/** @} */
9545
9546
9547/** @name Opcode Debug Helpers.
9548 * @{
9549 */
9550#ifdef DEBUG
9551# define IEMOP_MNEMONIC(a_szMnemonic) \
9552 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9553 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9554# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9555 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9556 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9557#else
9558# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9559# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9560#endif
9561
9562/** @} */
9563
9564
9565/** @name Opcode Helpers.
9566 * @{
9567 */
9568
9569#ifdef IN_RING3
9570# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9571 do { \
9572 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9573 else \
9574 { \
9575 DBGFSTOP(IEMCPU_TO_VM(pIemCpu)); \
9576 return IEMOP_RAISE_INVALID_OPCODE(); \
9577 } \
9578 } while (0)
9579#else
9580# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9581 do { \
9582 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9583 else return IEMOP_RAISE_INVALID_OPCODE(); \
9584 } while (0)
9585#endif
9586
9587/** The instruction requires a 186 or later. */
9588#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
9589# define IEMOP_HLP_MIN_186() do { } while (0)
9590#else
9591# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
9592#endif
9593
9594/** The instruction requires a 286 or later. */
9595#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
9596# define IEMOP_HLP_MIN_286() do { } while (0)
9597#else
9598# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
9599#endif
9600
9601/** The instruction requires a 386 or later. */
9602#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9603# define IEMOP_HLP_MIN_386() do { } while (0)
9604#else
9605# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
9606#endif
9607
9608/** The instruction requires a 386 or later if the given expression is true. */
9609#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9610# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
9611#else
9612# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
9613#endif
9614
9615/** The instruction requires a 486 or later. */
9616#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
9617# define IEMOP_HLP_MIN_486() do { } while (0)
9618#else
9619# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
9620#endif
9621
9622/** The instruction requires a Pentium (586) or later. */
9623#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_586
9624# define IEMOP_HLP_MIN_586() do { } while (0)
9625#else
9626# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_586, true)
9627#endif
9628
9629/** The instruction requires a PentiumPro (686) or later. */
9630#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_686
9631# define IEMOP_HLP_MIN_686() do { } while (0)
9632#else
9633# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_686, true)
9634#endif
9635
9636
9637/** The instruction raises an \#UD in real and V8086 mode. */
9638#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9639 do \
9640 { \
9641 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9642 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9643 } while (0)
9644
9645/** The instruction allows no lock prefixing (in this encoding), throw \#UD if
9646 * lock prefixed.
9647 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9648#define IEMOP_HLP_NO_LOCK_PREFIX() \
9649 do \
9650 { \
9651 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9652 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9653 } while (0)
9654
9655/** The instruction is not available in 64-bit mode, throw \#UD if we're in
9656 * 64-bit mode. */
9657#define IEMOP_HLP_NO_64BIT() \
9658 do \
9659 { \
9660 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9661 return IEMOP_RAISE_INVALID_OPCODE(); \
9662 } while (0)
9663
9664/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
9665 * 64-bit mode. */
9666#define IEMOP_HLP_ONLY_64BIT() \
9667 do \
9668 { \
9669 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9670 return IEMOP_RAISE_INVALID_OPCODE(); \
9671 } while (0)
9672
9673/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9674#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9675 do \
9676 { \
9677 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9678 iemRecalEffOpSize64Default(pIemCpu); \
9679 } while (0)
9680
9681/** The instruction has 64-bit operand size if 64-bit mode. */
9682#define IEMOP_HLP_64BIT_OP_SIZE() \
9683 do \
9684 { \
9685 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9686 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9687 } while (0)
9688
9689/** Only a REX prefix immediately preceeding the first opcode byte takes
9690 * effect. This macro helps ensuring this as well as logging bad guest code. */
9691#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9692 do \
9693 { \
9694 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9695 { \
9696 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9697 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9698 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9699 pIemCpu->uRexB = 0; \
9700 pIemCpu->uRexIndex = 0; \
9701 pIemCpu->uRexReg = 0; \
9702 iemRecalEffOpSize(pIemCpu); \
9703 } \
9704 } while (0)
9705
9706/**
9707 * Done decoding.
9708 */
9709#define IEMOP_HLP_DONE_DECODING() \
9710 do \
9711 { \
9712 /*nothing for now, maybe later... */ \
9713 } while (0)
9714
9715/**
9716 * Done decoding, raise \#UD exception if lock prefix present.
9717 */
9718#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9719 do \
9720 { \
9721 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9722 { /* likely */ } \
9723 else \
9724 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9725 } while (0)
9726#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9727 do \
9728 { \
9729 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9730 { /* likely */ } \
9731 else \
9732 { \
9733 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9734 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9735 } \
9736 } while (0)
9737#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9738 do \
9739 { \
9740 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9741 { /* likely */ } \
9742 else \
9743 { \
9744 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9745 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9746 } \
9747 } while (0)
9748/**
9749 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9750 * are present.
9751 */
9752#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9753 do \
9754 { \
9755 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9756 { /* likely */ } \
9757 else \
9758 return IEMOP_RAISE_INVALID_OPCODE(); \
9759 } while (0)
9760
9761
9762/**
9763 * Calculates the effective address of a ModR/M memory operand.
9764 *
9765 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9766 *
9767 * @return Strict VBox status code.
9768 * @param pIemCpu The IEM per CPU data.
9769 * @param bRm The ModRM byte.
9770 * @param cbImm The size of any immediate following the
9771 * effective address opcode bytes. Important for
9772 * RIP relative addressing.
9773 * @param pGCPtrEff Where to return the effective address.
9774 */
9775IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9776{
9777 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9778 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9779#define SET_SS_DEF() \
9780 do \
9781 { \
9782 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9783 pIemCpu->iEffSeg = X86_SREG_SS; \
9784 } while (0)
9785
9786 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9787 {
9788/** @todo Check the effective address size crap! */
9789 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9790 {
9791 uint16_t u16EffAddr;
9792
9793 /* Handle the disp16 form with no registers first. */
9794 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9795 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9796 else
9797 {
9798 /* Get the displacment. */
9799 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9800 {
9801 case 0: u16EffAddr = 0; break;
9802 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9803 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9804 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9805 }
9806
9807 /* Add the base and index registers to the disp. */
9808 switch (bRm & X86_MODRM_RM_MASK)
9809 {
9810 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9811 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9812 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9813 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9814 case 4: u16EffAddr += pCtx->si; break;
9815 case 5: u16EffAddr += pCtx->di; break;
9816 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9817 case 7: u16EffAddr += pCtx->bx; break;
9818 }
9819 }
9820
9821 *pGCPtrEff = u16EffAddr;
9822 }
9823 else
9824 {
9825 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9826 uint32_t u32EffAddr;
9827
9828 /* Handle the disp32 form with no registers first. */
9829 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9830 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9831 else
9832 {
9833 /* Get the register (or SIB) value. */
9834 switch ((bRm & X86_MODRM_RM_MASK))
9835 {
9836 case 0: u32EffAddr = pCtx->eax; break;
9837 case 1: u32EffAddr = pCtx->ecx; break;
9838 case 2: u32EffAddr = pCtx->edx; break;
9839 case 3: u32EffAddr = pCtx->ebx; break;
9840 case 4: /* SIB */
9841 {
9842 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9843
9844 /* Get the index and scale it. */
9845 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9846 {
9847 case 0: u32EffAddr = pCtx->eax; break;
9848 case 1: u32EffAddr = pCtx->ecx; break;
9849 case 2: u32EffAddr = pCtx->edx; break;
9850 case 3: u32EffAddr = pCtx->ebx; break;
9851 case 4: u32EffAddr = 0; /*none */ break;
9852 case 5: u32EffAddr = pCtx->ebp; break;
9853 case 6: u32EffAddr = pCtx->esi; break;
9854 case 7: u32EffAddr = pCtx->edi; break;
9855 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9856 }
9857 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9858
9859 /* add base */
9860 switch (bSib & X86_SIB_BASE_MASK)
9861 {
9862 case 0: u32EffAddr += pCtx->eax; break;
9863 case 1: u32EffAddr += pCtx->ecx; break;
9864 case 2: u32EffAddr += pCtx->edx; break;
9865 case 3: u32EffAddr += pCtx->ebx; break;
9866 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9867 case 5:
9868 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9869 {
9870 u32EffAddr += pCtx->ebp;
9871 SET_SS_DEF();
9872 }
9873 else
9874 {
9875 uint32_t u32Disp;
9876 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9877 u32EffAddr += u32Disp;
9878 }
9879 break;
9880 case 6: u32EffAddr += pCtx->esi; break;
9881 case 7: u32EffAddr += pCtx->edi; break;
9882 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9883 }
9884 break;
9885 }
9886 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9887 case 6: u32EffAddr = pCtx->esi; break;
9888 case 7: u32EffAddr = pCtx->edi; break;
9889 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9890 }
9891
9892 /* Get and add the displacement. */
9893 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9894 {
9895 case 0:
9896 break;
9897 case 1:
9898 {
9899 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9900 u32EffAddr += i8Disp;
9901 break;
9902 }
9903 case 2:
9904 {
9905 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9906 u32EffAddr += u32Disp;
9907 break;
9908 }
9909 default:
9910 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9911 }
9912
9913 }
9914 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9915 *pGCPtrEff = u32EffAddr;
9916 else
9917 {
9918 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9919 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9920 }
9921 }
9922 }
9923 else
9924 {
9925 uint64_t u64EffAddr;
9926
9927 /* Handle the rip+disp32 form with no registers first. */
9928 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9929 {
9930 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9931 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9932 }
9933 else
9934 {
9935 /* Get the register (or SIB) value. */
9936 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9937 {
9938 case 0: u64EffAddr = pCtx->rax; break;
9939 case 1: u64EffAddr = pCtx->rcx; break;
9940 case 2: u64EffAddr = pCtx->rdx; break;
9941 case 3: u64EffAddr = pCtx->rbx; break;
9942 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9943 case 6: u64EffAddr = pCtx->rsi; break;
9944 case 7: u64EffAddr = pCtx->rdi; break;
9945 case 8: u64EffAddr = pCtx->r8; break;
9946 case 9: u64EffAddr = pCtx->r9; break;
9947 case 10: u64EffAddr = pCtx->r10; break;
9948 case 11: u64EffAddr = pCtx->r11; break;
9949 case 13: u64EffAddr = pCtx->r13; break;
9950 case 14: u64EffAddr = pCtx->r14; break;
9951 case 15: u64EffAddr = pCtx->r15; break;
9952 /* SIB */
9953 case 4:
9954 case 12:
9955 {
9956 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9957
9958 /* Get the index and scale it. */
9959 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9960 {
9961 case 0: u64EffAddr = pCtx->rax; break;
9962 case 1: u64EffAddr = pCtx->rcx; break;
9963 case 2: u64EffAddr = pCtx->rdx; break;
9964 case 3: u64EffAddr = pCtx->rbx; break;
9965 case 4: u64EffAddr = 0; /*none */ break;
9966 case 5: u64EffAddr = pCtx->rbp; break;
9967 case 6: u64EffAddr = pCtx->rsi; break;
9968 case 7: u64EffAddr = pCtx->rdi; break;
9969 case 8: u64EffAddr = pCtx->r8; break;
9970 case 9: u64EffAddr = pCtx->r9; break;
9971 case 10: u64EffAddr = pCtx->r10; break;
9972 case 11: u64EffAddr = pCtx->r11; break;
9973 case 12: u64EffAddr = pCtx->r12; break;
9974 case 13: u64EffAddr = pCtx->r13; break;
9975 case 14: u64EffAddr = pCtx->r14; break;
9976 case 15: u64EffAddr = pCtx->r15; break;
9977 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9978 }
9979 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9980
9981 /* add base */
9982 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9983 {
9984 case 0: u64EffAddr += pCtx->rax; break;
9985 case 1: u64EffAddr += pCtx->rcx; break;
9986 case 2: u64EffAddr += pCtx->rdx; break;
9987 case 3: u64EffAddr += pCtx->rbx; break;
9988 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9989 case 6: u64EffAddr += pCtx->rsi; break;
9990 case 7: u64EffAddr += pCtx->rdi; break;
9991 case 8: u64EffAddr += pCtx->r8; break;
9992 case 9: u64EffAddr += pCtx->r9; break;
9993 case 10: u64EffAddr += pCtx->r10; break;
9994 case 11: u64EffAddr += pCtx->r11; break;
9995 case 12: u64EffAddr += pCtx->r12; break;
9996 case 14: u64EffAddr += pCtx->r14; break;
9997 case 15: u64EffAddr += pCtx->r15; break;
9998 /* complicated encodings */
9999 case 5:
10000 case 13:
10001 if ((bRm & X86_MODRM_MOD_MASK) != 0)
10002 {
10003 if (!pIemCpu->uRexB)
10004 {
10005 u64EffAddr += pCtx->rbp;
10006 SET_SS_DEF();
10007 }
10008 else
10009 u64EffAddr += pCtx->r13;
10010 }
10011 else
10012 {
10013 uint32_t u32Disp;
10014 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
10015 u64EffAddr += (int32_t)u32Disp;
10016 }
10017 break;
10018 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10019 }
10020 break;
10021 }
10022 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10023 }
10024
10025 /* Get and add the displacement. */
10026 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
10027 {
10028 case 0:
10029 break;
10030 case 1:
10031 {
10032 int8_t i8Disp;
10033 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
10034 u64EffAddr += i8Disp;
10035 break;
10036 }
10037 case 2:
10038 {
10039 uint32_t u32Disp;
10040 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
10041 u64EffAddr += (int32_t)u32Disp;
10042 break;
10043 }
10044 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
10045 }
10046
10047 }
10048
10049 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
10050 *pGCPtrEff = u64EffAddr;
10051 else
10052 {
10053 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
10054 *pGCPtrEff = u64EffAddr & UINT32_MAX;
10055 }
10056 }
10057
10058 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
10059 return VINF_SUCCESS;
10060}
10061
10062/** @} */
10063
10064
10065
10066/*
10067 * Include the instructions
10068 */
10069#include "IEMAllInstructions.cpp.h"
10070
10071
10072
10073
10074#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10075
10076/**
10077 * Sets up execution verification mode.
10078 */
10079IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
10080{
10081 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10082 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
10083
10084 /*
10085 * Always note down the address of the current instruction.
10086 */
10087 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
10088 pIemCpu->uOldRip = pOrgCtx->rip;
10089
10090 /*
10091 * Enable verification and/or logging.
10092 */
10093 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
10094 if ( fNewNoRem
10095 && ( 0
10096#if 0 /* auto enable on first paged protected mode interrupt */
10097 || ( pOrgCtx->eflags.Bits.u1IF
10098 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
10099 && TRPMHasTrap(pVCpu)
10100 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
10101#endif
10102#if 0
10103 || ( pOrgCtx->cs == 0x10
10104 && ( pOrgCtx->rip == 0x90119e3e
10105 || pOrgCtx->rip == 0x901d9810)
10106#endif
10107#if 0 /* Auto enable DSL - FPU stuff. */
10108 || ( pOrgCtx->cs == 0x10
10109 && (// pOrgCtx->rip == 0xc02ec07f
10110 //|| pOrgCtx->rip == 0xc02ec082
10111 //|| pOrgCtx->rip == 0xc02ec0c9
10112 0
10113 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
10114#endif
10115#if 0 /* Auto enable DSL - fstp st0 stuff. */
10116 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
10117#endif
10118#if 0
10119 || pOrgCtx->rip == 0x9022bb3a
10120#endif
10121#if 0
10122 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
10123#endif
10124#if 0
10125 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
10126 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
10127#endif
10128#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
10129 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
10130 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
10131 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
10132#endif
10133#if 0 /* NT4SP1 - xadd early boot. */
10134 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
10135#endif
10136#if 0 /* NT4SP1 - wrmsr (intel MSR). */
10137 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
10138#endif
10139#if 0 /* NT4SP1 - cmpxchg (AMD). */
10140 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
10141#endif
10142#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
10143 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
10144#endif
10145#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
10146 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
10147
10148#endif
10149#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
10150 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
10151
10152#endif
10153#if 0 /* NT4SP1 - frstor [ecx] */
10154 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
10155#endif
10156#if 0 /* xxxxxx - All long mode code. */
10157 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
10158#endif
10159#if 0 /* rep movsq linux 3.7 64-bit boot. */
10160 || (pOrgCtx->rip == 0x0000000000100241)
10161#endif
10162#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
10163 || (pOrgCtx->rip == 0x000000000215e240)
10164#endif
10165#if 0 /* DOS's size-overridden iret to v8086. */
10166 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
10167#endif
10168 )
10169 )
10170 {
10171 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
10172 RTLogFlags(NULL, "enabled");
10173 fNewNoRem = false;
10174 }
10175 if (fNewNoRem != pIemCpu->fNoRem)
10176 {
10177 pIemCpu->fNoRem = fNewNoRem;
10178 if (!fNewNoRem)
10179 {
10180 LogAlways(("Enabling verification mode!\n"));
10181 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
10182 }
10183 else
10184 LogAlways(("Disabling verification mode!\n"));
10185 }
10186
10187 /*
10188 * Switch state.
10189 */
10190 if (IEM_VERIFICATION_ENABLED(pIemCpu))
10191 {
10192 static CPUMCTX s_DebugCtx; /* Ugly! */
10193
10194 s_DebugCtx = *pOrgCtx;
10195 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
10196 }
10197
10198 /*
10199 * See if there is an interrupt pending in TRPM and inject it if we can.
10200 */
10201 pIemCpu->uInjectCpl = UINT8_MAX;
10202 if ( pOrgCtx->eflags.Bits.u1IF
10203 && TRPMHasTrap(pVCpu)
10204 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
10205 {
10206 uint8_t u8TrapNo;
10207 TRPMEVENT enmType;
10208 RTGCUINT uErrCode;
10209 RTGCPTR uCr2;
10210 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
10211 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
10212 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10213 TRPMResetTrap(pVCpu);
10214 pIemCpu->uInjectCpl = pIemCpu->uCpl;
10215 }
10216
10217 /*
10218 * Reset the counters.
10219 */
10220 pIemCpu->cIOReads = 0;
10221 pIemCpu->cIOWrites = 0;
10222 pIemCpu->fIgnoreRaxRdx = false;
10223 pIemCpu->fOverlappingMovs = false;
10224 pIemCpu->fProblematicMemory = false;
10225 pIemCpu->fUndefinedEFlags = 0;
10226
10227 if (IEM_VERIFICATION_ENABLED(pIemCpu))
10228 {
10229 /*
10230 * Free all verification records.
10231 */
10232 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
10233 pIemCpu->pIemEvtRecHead = NULL;
10234 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
10235 do
10236 {
10237 while (pEvtRec)
10238 {
10239 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
10240 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
10241 pIemCpu->pFreeEvtRec = pEvtRec;
10242 pEvtRec = pNext;
10243 }
10244 pEvtRec = pIemCpu->pOtherEvtRecHead;
10245 pIemCpu->pOtherEvtRecHead = NULL;
10246 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
10247 } while (pEvtRec);
10248 }
10249}
10250
10251
10252/**
10253 * Allocate an event record.
10254 * @returns Pointer to a record.
10255 */
10256IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
10257{
10258 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10259 return NULL;
10260
10261 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
10262 if (pEvtRec)
10263 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
10264 else
10265 {
10266 if (!pIemCpu->ppIemEvtRecNext)
10267 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
10268
10269 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
10270 if (!pEvtRec)
10271 return NULL;
10272 }
10273 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
10274 pEvtRec->pNext = NULL;
10275 return pEvtRec;
10276}
10277
10278
10279/**
10280 * IOMMMIORead notification.
10281 */
10282VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
10283{
10284 PVMCPU pVCpu = VMMGetCpu(pVM);
10285 if (!pVCpu)
10286 return;
10287 PIEMCPU pIemCpu = &pVCpu->iem.s;
10288 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10289 if (!pEvtRec)
10290 return;
10291 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
10292 pEvtRec->u.RamRead.GCPhys = GCPhys;
10293 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
10294 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10295 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10296}
10297
10298
10299/**
10300 * IOMMMIOWrite notification.
10301 */
10302VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
10303{
10304 PVMCPU pVCpu = VMMGetCpu(pVM);
10305 if (!pVCpu)
10306 return;
10307 PIEMCPU pIemCpu = &pVCpu->iem.s;
10308 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10309 if (!pEvtRec)
10310 return;
10311 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
10312 pEvtRec->u.RamWrite.GCPhys = GCPhys;
10313 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
10314 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
10315 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
10316 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
10317 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
10318 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10319 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10320}
10321
10322
10323/**
10324 * IOMIOPortRead notification.
10325 */
10326VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
10327{
10328 PVMCPU pVCpu = VMMGetCpu(pVM);
10329 if (!pVCpu)
10330 return;
10331 PIEMCPU pIemCpu = &pVCpu->iem.s;
10332 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10333 if (!pEvtRec)
10334 return;
10335 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10336 pEvtRec->u.IOPortRead.Port = Port;
10337 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10338 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10339 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10340}
10341
10342/**
10343 * IOMIOPortWrite notification.
10344 */
10345VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10346{
10347 PVMCPU pVCpu = VMMGetCpu(pVM);
10348 if (!pVCpu)
10349 return;
10350 PIEMCPU pIemCpu = &pVCpu->iem.s;
10351 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10352 if (!pEvtRec)
10353 return;
10354 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10355 pEvtRec->u.IOPortWrite.Port = Port;
10356 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10357 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10358 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10359 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10360}
10361
10362
10363VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10364{
10365 PVMCPU pVCpu = VMMGetCpu(pVM);
10366 if (!pVCpu)
10367 return;
10368 PIEMCPU pIemCpu = &pVCpu->iem.s;
10369 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10370 if (!pEvtRec)
10371 return;
10372 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
10373 pEvtRec->u.IOPortStrRead.Port = Port;
10374 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
10375 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
10376 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10377 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10378}
10379
10380
10381VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10382{
10383 PVMCPU pVCpu = VMMGetCpu(pVM);
10384 if (!pVCpu)
10385 return;
10386 PIEMCPU pIemCpu = &pVCpu->iem.s;
10387 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10388 if (!pEvtRec)
10389 return;
10390 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
10391 pEvtRec->u.IOPortStrWrite.Port = Port;
10392 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
10393 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
10394 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10395 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10396}
10397
10398
10399/**
10400 * Fakes and records an I/O port read.
10401 *
10402 * @returns VINF_SUCCESS.
10403 * @param pIemCpu The IEM per CPU data.
10404 * @param Port The I/O port.
10405 * @param pu32Value Where to store the fake value.
10406 * @param cbValue The size of the access.
10407 */
10408IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10409{
10410 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10411 if (pEvtRec)
10412 {
10413 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10414 pEvtRec->u.IOPortRead.Port = Port;
10415 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10416 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10417 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10418 }
10419 pIemCpu->cIOReads++;
10420 *pu32Value = 0xcccccccc;
10421 return VINF_SUCCESS;
10422}
10423
10424
10425/**
10426 * Fakes and records an I/O port write.
10427 *
10428 * @returns VINF_SUCCESS.
10429 * @param pIemCpu The IEM per CPU data.
10430 * @param Port The I/O port.
10431 * @param u32Value The value being written.
10432 * @param cbValue The size of the access.
10433 */
10434IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10435{
10436 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10437 if (pEvtRec)
10438 {
10439 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10440 pEvtRec->u.IOPortWrite.Port = Port;
10441 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10442 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10443 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10444 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10445 }
10446 pIemCpu->cIOWrites++;
10447 return VINF_SUCCESS;
10448}
10449
10450
10451/**
10452 * Used to add extra details about a stub case.
10453 * @param pIemCpu The IEM per CPU state.
10454 */
10455IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10456{
10457 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10458 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10459 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10460 char szRegs[4096];
10461 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10462 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10463 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10464 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10465 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10466 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10467 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10468 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10469 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10470 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10471 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10472 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10473 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10474 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10475 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10476 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10477 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10478 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10479 " efer=%016VR{efer}\n"
10480 " pat=%016VR{pat}\n"
10481 " sf_mask=%016VR{sf_mask}\n"
10482 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10483 " lstar=%016VR{lstar}\n"
10484 " star=%016VR{star} cstar=%016VR{cstar}\n"
10485 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10486 );
10487
10488 char szInstr1[256];
10489 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10490 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10491 szInstr1, sizeof(szInstr1), NULL);
10492 char szInstr2[256];
10493 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10494 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10495 szInstr2, sizeof(szInstr2), NULL);
10496
10497 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10498}
10499
10500
10501/**
10502 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10503 * dump to the assertion info.
10504 *
10505 * @param pEvtRec The record to dump.
10506 */
10507IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10508{
10509 switch (pEvtRec->enmEvent)
10510 {
10511 case IEMVERIFYEVENT_IOPORT_READ:
10512 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10513 pEvtRec->u.IOPortWrite.Port,
10514 pEvtRec->u.IOPortWrite.cbValue);
10515 break;
10516 case IEMVERIFYEVENT_IOPORT_WRITE:
10517 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10518 pEvtRec->u.IOPortWrite.Port,
10519 pEvtRec->u.IOPortWrite.cbValue,
10520 pEvtRec->u.IOPortWrite.u32Value);
10521 break;
10522 case IEMVERIFYEVENT_IOPORT_STR_READ:
10523 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
10524 pEvtRec->u.IOPortStrWrite.Port,
10525 pEvtRec->u.IOPortStrWrite.cbValue,
10526 pEvtRec->u.IOPortStrWrite.cTransfers);
10527 break;
10528 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10529 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
10530 pEvtRec->u.IOPortStrWrite.Port,
10531 pEvtRec->u.IOPortStrWrite.cbValue,
10532 pEvtRec->u.IOPortStrWrite.cTransfers);
10533 break;
10534 case IEMVERIFYEVENT_RAM_READ:
10535 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10536 pEvtRec->u.RamRead.GCPhys,
10537 pEvtRec->u.RamRead.cb);
10538 break;
10539 case IEMVERIFYEVENT_RAM_WRITE:
10540 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10541 pEvtRec->u.RamWrite.GCPhys,
10542 pEvtRec->u.RamWrite.cb,
10543 (int)pEvtRec->u.RamWrite.cb,
10544 pEvtRec->u.RamWrite.ab);
10545 break;
10546 default:
10547 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10548 break;
10549 }
10550}
10551
10552
10553/**
10554 * Raises an assertion on the specified record, showing the given message with
10555 * a record dump attached.
10556 *
10557 * @param pIemCpu The IEM per CPU data.
10558 * @param pEvtRec1 The first record.
10559 * @param pEvtRec2 The second record.
10560 * @param pszMsg The message explaining why we're asserting.
10561 */
10562IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10563{
10564 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10565 iemVerifyAssertAddRecordDump(pEvtRec1);
10566 iemVerifyAssertAddRecordDump(pEvtRec2);
10567 iemVerifyAssertMsg2(pIemCpu);
10568 RTAssertPanic();
10569}
10570
10571
10572/**
10573 * Raises an assertion on the specified record, showing the given message with
10574 * a record dump attached.
10575 *
10576 * @param pIemCpu The IEM per CPU data.
10577 * @param pEvtRec1 The first record.
10578 * @param pszMsg The message explaining why we're asserting.
10579 */
10580IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10581{
10582 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10583 iemVerifyAssertAddRecordDump(pEvtRec);
10584 iemVerifyAssertMsg2(pIemCpu);
10585 RTAssertPanic();
10586}
10587
10588
10589/**
10590 * Verifies a write record.
10591 *
10592 * @param pIemCpu The IEM per CPU data.
10593 * @param pEvtRec The write record.
10594 * @param fRem Set if REM was doing the other executing. If clear
10595 * it was HM.
10596 */
10597IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10598{
10599 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10600 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10601 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10602 if ( RT_FAILURE(rc)
10603 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10604 {
10605 /* fend off ins */
10606 if ( !pIemCpu->cIOReads
10607 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10608 || ( pEvtRec->u.RamWrite.cb != 1
10609 && pEvtRec->u.RamWrite.cb != 2
10610 && pEvtRec->u.RamWrite.cb != 4) )
10611 {
10612 /* fend off ROMs and MMIO */
10613 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10614 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10615 {
10616 /* fend off fxsave */
10617 if (pEvtRec->u.RamWrite.cb != 512)
10618 {
10619 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10620 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10621 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10622 RTAssertMsg2Add("%s: %.*Rhxs\n"
10623 "iem: %.*Rhxs\n",
10624 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10625 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10626 iemVerifyAssertAddRecordDump(pEvtRec);
10627 iemVerifyAssertMsg2(pIemCpu);
10628 RTAssertPanic();
10629 }
10630 }
10631 }
10632 }
10633
10634}
10635
10636/**
10637 * Performs the post-execution verfication checks.
10638 */
10639IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrictIem)
10640{
10641 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10642 return rcStrictIem;
10643
10644 /*
10645 * Switch back the state.
10646 */
10647 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10648 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10649 Assert(pOrgCtx != pDebugCtx);
10650 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10651
10652 /*
10653 * Execute the instruction in REM.
10654 */
10655 bool fRem = false;
10656 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10657 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10658 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10659#ifdef IEM_VERIFICATION_MODE_FULL_HM
10660 if ( HMIsEnabled(pVM)
10661 && pIemCpu->cIOReads == 0
10662 && pIemCpu->cIOWrites == 0
10663 && !pIemCpu->fProblematicMemory)
10664 {
10665 uint64_t uStartRip = pOrgCtx->rip;
10666 unsigned iLoops = 0;
10667 do
10668 {
10669 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10670 iLoops++;
10671 } while ( rc == VINF_SUCCESS
10672 || ( rc == VINF_EM_DBG_STEPPED
10673 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10674 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10675 || ( pOrgCtx->rip != pDebugCtx->rip
10676 && pIemCpu->uInjectCpl != UINT8_MAX
10677 && iLoops < 8) );
10678 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10679 rc = VINF_SUCCESS;
10680 }
10681#endif
10682 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10683 || rc == VINF_IOM_R3_IOPORT_READ
10684 || rc == VINF_IOM_R3_IOPORT_WRITE
10685 || rc == VINF_IOM_R3_MMIO_READ
10686 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10687 || rc == VINF_IOM_R3_MMIO_WRITE
10688 || rc == VINF_CPUM_R3_MSR_READ
10689 || rc == VINF_CPUM_R3_MSR_WRITE
10690 || rc == VINF_EM_RESCHEDULE
10691 )
10692 {
10693 EMRemLock(pVM);
10694 rc = REMR3EmulateInstruction(pVM, pVCpu);
10695 AssertRC(rc);
10696 EMRemUnlock(pVM);
10697 fRem = true;
10698 }
10699
10700# if 1 /* Skip unimplemented instructions for now. */
10701 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10702 {
10703 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10704 if (rc == VINF_EM_DBG_STEPPED)
10705 return VINF_SUCCESS;
10706 return rc;
10707 }
10708# endif
10709
10710 /*
10711 * Compare the register states.
10712 */
10713 unsigned cDiffs = 0;
10714 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10715 {
10716 //Log(("REM and IEM ends up with different registers!\n"));
10717 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10718
10719# define CHECK_FIELD(a_Field) \
10720 do \
10721 { \
10722 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10723 { \
10724 switch (sizeof(pOrgCtx->a_Field)) \
10725 { \
10726 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10727 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10728 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10729 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10730 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10731 } \
10732 cDiffs++; \
10733 } \
10734 } while (0)
10735# define CHECK_XSTATE_FIELD(a_Field) \
10736 do \
10737 { \
10738 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10739 { \
10740 switch (sizeof(pOrgXState->a_Field)) \
10741 { \
10742 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10743 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10744 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10745 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10746 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10747 } \
10748 cDiffs++; \
10749 } \
10750 } while (0)
10751
10752# define CHECK_BIT_FIELD(a_Field) \
10753 do \
10754 { \
10755 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10756 { \
10757 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10758 cDiffs++; \
10759 } \
10760 } while (0)
10761
10762# define CHECK_SEL(a_Sel) \
10763 do \
10764 { \
10765 CHECK_FIELD(a_Sel.Sel); \
10766 CHECK_FIELD(a_Sel.Attr.u); \
10767 CHECK_FIELD(a_Sel.u64Base); \
10768 CHECK_FIELD(a_Sel.u32Limit); \
10769 CHECK_FIELD(a_Sel.fFlags); \
10770 } while (0)
10771
10772 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10773 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10774
10775#if 1 /* The recompiler doesn't update these the intel way. */
10776 if (fRem)
10777 {
10778 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10779 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10780 pOrgXState->x87.CS = pDebugXState->x87.CS;
10781 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10782 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10783 pOrgXState->x87.DS = pDebugXState->x87.DS;
10784 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10785 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10786 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10787 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10788 }
10789#endif
10790 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10791 {
10792 RTAssertMsg2Weak(" the FPU state differs\n");
10793 cDiffs++;
10794 CHECK_XSTATE_FIELD(x87.FCW);
10795 CHECK_XSTATE_FIELD(x87.FSW);
10796 CHECK_XSTATE_FIELD(x87.FTW);
10797 CHECK_XSTATE_FIELD(x87.FOP);
10798 CHECK_XSTATE_FIELD(x87.FPUIP);
10799 CHECK_XSTATE_FIELD(x87.CS);
10800 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10801 CHECK_XSTATE_FIELD(x87.FPUDP);
10802 CHECK_XSTATE_FIELD(x87.DS);
10803 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10804 CHECK_XSTATE_FIELD(x87.MXCSR);
10805 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10806 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10807 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10808 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10809 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10810 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10811 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10812 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10813 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10814 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10815 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10816 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10817 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10818 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10819 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10820 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10821 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10822 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10823 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10824 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10825 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10826 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10827 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10828 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10829 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10830 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10831 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10832 }
10833 CHECK_FIELD(rip);
10834 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10835 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10836 {
10837 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10838 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10839 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10840 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10841 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10842 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10843 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10844 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10845 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10846 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10847 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10848 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10849 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10850 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10851 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10852 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10853 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10854 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10855 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10856 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10857 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10858 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10859 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10860 }
10861
10862 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10863 CHECK_FIELD(rax);
10864 CHECK_FIELD(rcx);
10865 if (!pIemCpu->fIgnoreRaxRdx)
10866 CHECK_FIELD(rdx);
10867 CHECK_FIELD(rbx);
10868 CHECK_FIELD(rsp);
10869 CHECK_FIELD(rbp);
10870 CHECK_FIELD(rsi);
10871 CHECK_FIELD(rdi);
10872 CHECK_FIELD(r8);
10873 CHECK_FIELD(r9);
10874 CHECK_FIELD(r10);
10875 CHECK_FIELD(r11);
10876 CHECK_FIELD(r12);
10877 CHECK_FIELD(r13);
10878 CHECK_SEL(cs);
10879 CHECK_SEL(ss);
10880 CHECK_SEL(ds);
10881 CHECK_SEL(es);
10882 CHECK_SEL(fs);
10883 CHECK_SEL(gs);
10884 CHECK_FIELD(cr0);
10885
10886 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10887 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10888 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10889 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10890 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10891 {
10892 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10893 { /* ignore */ }
10894 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10895 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10896 && fRem)
10897 { /* ignore */ }
10898 else
10899 CHECK_FIELD(cr2);
10900 }
10901 CHECK_FIELD(cr3);
10902 CHECK_FIELD(cr4);
10903 CHECK_FIELD(dr[0]);
10904 CHECK_FIELD(dr[1]);
10905 CHECK_FIELD(dr[2]);
10906 CHECK_FIELD(dr[3]);
10907 CHECK_FIELD(dr[6]);
10908 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10909 CHECK_FIELD(dr[7]);
10910 CHECK_FIELD(gdtr.cbGdt);
10911 CHECK_FIELD(gdtr.pGdt);
10912 CHECK_FIELD(idtr.cbIdt);
10913 CHECK_FIELD(idtr.pIdt);
10914 CHECK_SEL(ldtr);
10915 CHECK_SEL(tr);
10916 CHECK_FIELD(SysEnter.cs);
10917 CHECK_FIELD(SysEnter.eip);
10918 CHECK_FIELD(SysEnter.esp);
10919 CHECK_FIELD(msrEFER);
10920 CHECK_FIELD(msrSTAR);
10921 CHECK_FIELD(msrPAT);
10922 CHECK_FIELD(msrLSTAR);
10923 CHECK_FIELD(msrCSTAR);
10924 CHECK_FIELD(msrSFMASK);
10925 CHECK_FIELD(msrKERNELGSBASE);
10926
10927 if (cDiffs != 0)
10928 {
10929 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
10930 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10931 RTAssertPanic();
10932 static bool volatile s_fEnterDebugger = true;
10933 if (s_fEnterDebugger)
10934 DBGFSTOP(pVM);
10935
10936# if 1 /* Ignore unimplemented instructions for now. */
10937 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10938 rcStrictIem = VINF_SUCCESS;
10939# endif
10940 }
10941# undef CHECK_FIELD
10942# undef CHECK_BIT_FIELD
10943 }
10944
10945 /*
10946 * If the register state compared fine, check the verification event
10947 * records.
10948 */
10949 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10950 {
10951 /*
10952 * Compare verficiation event records.
10953 * - I/O port accesses should be a 1:1 match.
10954 */
10955 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10956 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10957 while (pIemRec && pOtherRec)
10958 {
10959 /* Since we might miss RAM writes and reads, ignore reads and check
10960 that any written memory is the same extra ones. */
10961 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10962 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10963 && pIemRec->pNext)
10964 {
10965 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10966 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10967 pIemRec = pIemRec->pNext;
10968 }
10969
10970 /* Do the compare. */
10971 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10972 {
10973 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10974 break;
10975 }
10976 bool fEquals;
10977 switch (pIemRec->enmEvent)
10978 {
10979 case IEMVERIFYEVENT_IOPORT_READ:
10980 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10981 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10982 break;
10983 case IEMVERIFYEVENT_IOPORT_WRITE:
10984 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10985 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10986 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10987 break;
10988 case IEMVERIFYEVENT_IOPORT_STR_READ:
10989 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
10990 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
10991 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
10992 break;
10993 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10994 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
10995 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
10996 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
10997 break;
10998 case IEMVERIFYEVENT_RAM_READ:
10999 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
11000 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
11001 break;
11002 case IEMVERIFYEVENT_RAM_WRITE:
11003 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
11004 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
11005 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
11006 break;
11007 default:
11008 fEquals = false;
11009 break;
11010 }
11011 if (!fEquals)
11012 {
11013 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
11014 break;
11015 }
11016
11017 /* advance */
11018 pIemRec = pIemRec->pNext;
11019 pOtherRec = pOtherRec->pNext;
11020 }
11021
11022 /* Ignore extra writes and reads. */
11023 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
11024 {
11025 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
11026 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
11027 pIemRec = pIemRec->pNext;
11028 }
11029 if (pIemRec != NULL)
11030 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
11031 else if (pOtherRec != NULL)
11032 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
11033 }
11034 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
11035
11036 return rcStrictIem;
11037}
11038
11039#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
11040
11041/* stubs */
11042IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
11043{
11044 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
11045 return VERR_INTERNAL_ERROR;
11046}
11047
11048IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
11049{
11050 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
11051 return VERR_INTERNAL_ERROR;
11052}
11053
11054#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
11055
11056
11057#ifdef LOG_ENABLED
11058/**
11059 * Logs the current instruction.
11060 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11061 * @param pCtx The current CPU context.
11062 * @param fSameCtx Set if we have the same context information as the VMM,
11063 * clear if we may have already executed an instruction in
11064 * our debug context. When clear, we assume IEMCPU holds
11065 * valid CPU mode info.
11066 */
11067IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
11068{
11069# ifdef IN_RING3
11070 if (LogIs2Enabled())
11071 {
11072 char szInstr[256];
11073 uint32_t cbInstr = 0;
11074 if (fSameCtx)
11075 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
11076 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
11077 szInstr, sizeof(szInstr), &cbInstr);
11078 else
11079 {
11080 uint32_t fFlags = 0;
11081 switch (pVCpu->iem.s.enmCpuMode)
11082 {
11083 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
11084 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
11085 case IEMMODE_16BIT:
11086 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
11087 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
11088 else
11089 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
11090 break;
11091 }
11092 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
11093 szInstr, sizeof(szInstr), &cbInstr);
11094 }
11095
11096 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
11097 Log2(("****\n"
11098 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
11099 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
11100 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
11101 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
11102 " %s\n"
11103 ,
11104 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
11105 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
11106 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
11107 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
11108 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
11109 szInstr));
11110
11111 if (LogIs3Enabled())
11112 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
11113 }
11114 else
11115# endif
11116 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
11117 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
11118}
11119#endif
11120
11121
11122/**
11123 * Makes status code addjustments (pass up from I/O and access handler)
11124 * as well as maintaining statistics.
11125 *
11126 * @returns Strict VBox status code to pass up.
11127 * @param pIemCpu The IEM per CPU data.
11128 * @param rcStrict The status from executing an instruction.
11129 */
11130DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
11131{
11132 if (rcStrict != VINF_SUCCESS)
11133 {
11134 if (RT_SUCCESS(rcStrict))
11135 {
11136 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
11137 || rcStrict == VINF_IOM_R3_IOPORT_READ
11138 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
11139 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
11140 || rcStrict == VINF_IOM_R3_MMIO_READ
11141 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
11142 || rcStrict == VINF_IOM_R3_MMIO_WRITE
11143 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
11144 || rcStrict == VINF_CPUM_R3_MSR_READ
11145 || rcStrict == VINF_CPUM_R3_MSR_WRITE
11146 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
11147 || rcStrict == VINF_EM_RAW_TO_R3
11148 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
11149 /* raw-mode / virt handlers only: */
11150 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
11151 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
11152 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
11153 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
11154 || rcStrict == VINF_SELM_SYNC_GDT
11155 || rcStrict == VINF_CSAM_PENDING_ACTION
11156 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
11157 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11158/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
11159 int32_t const rcPassUp = pIemCpu->rcPassUp;
11160 if (rcPassUp == VINF_SUCCESS)
11161 pIemCpu->cRetInfStatuses++;
11162 else if ( rcPassUp < VINF_EM_FIRST
11163 || rcPassUp > VINF_EM_LAST
11164 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
11165 {
11166 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
11167 pIemCpu->cRetPassUpStatus++;
11168 rcStrict = rcPassUp;
11169 }
11170 else
11171 {
11172 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
11173 pIemCpu->cRetInfStatuses++;
11174 }
11175 }
11176 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
11177 pIemCpu->cRetAspectNotImplemented++;
11178 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
11179 pIemCpu->cRetInstrNotImplemented++;
11180#ifdef IEM_VERIFICATION_MODE_FULL
11181 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
11182 rcStrict = VINF_SUCCESS;
11183#endif
11184 else
11185 pIemCpu->cRetErrStatuses++;
11186 }
11187 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
11188 {
11189 pIemCpu->cRetPassUpStatus++;
11190 rcStrict = pIemCpu->rcPassUp;
11191 }
11192
11193 return rcStrict;
11194}
11195
11196
11197/**
11198 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
11199 * IEMExecOneWithPrefetchedByPC.
11200 *
11201 * @return Strict VBox status code.
11202 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11203 * @param pIemCpu The IEM per CPU data.
11204 * @param fExecuteInhibit If set, execute the instruction following CLI,
11205 * POP SS and MOV SS,GR.
11206 */
11207DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
11208{
11209 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
11210 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
11211 if (rcStrict == VINF_SUCCESS)
11212 pIemCpu->cInstructions++;
11213 if (pIemCpu->cActiveMappings > 0)
11214 iemMemRollback(pIemCpu);
11215//#ifdef DEBUG
11216// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
11217//#endif
11218
11219 /* Execute the next instruction as well if a cli, pop ss or
11220 mov ss, Gr has just completed successfully. */
11221 if ( fExecuteInhibit
11222 && rcStrict == VINF_SUCCESS
11223 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
11224 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
11225 {
11226 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
11227 if (rcStrict == VINF_SUCCESS)
11228 {
11229# ifdef LOG_ENABLED
11230 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
11231# endif
11232 IEM_OPCODE_GET_NEXT_U8(&b);
11233 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
11234 if (rcStrict == VINF_SUCCESS)
11235 pIemCpu->cInstructions++;
11236 if (pIemCpu->cActiveMappings > 0)
11237 iemMemRollback(pIemCpu);
11238 }
11239 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
11240 }
11241
11242 /*
11243 * Return value fiddling, statistics and sanity assertions.
11244 */
11245 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11246
11247 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
11248 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
11249#if defined(IEM_VERIFICATION_MODE_FULL)
11250 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
11251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
11252 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
11253 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
11254#endif
11255 return rcStrict;
11256}
11257
11258
11259#ifdef IN_RC
11260/**
11261 * Re-enters raw-mode or ensure we return to ring-3.
11262 *
11263 * @returns rcStrict, maybe modified.
11264 * @param pIemCpu The IEM CPU structure.
11265 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11266 * @param pCtx The current CPU context.
11267 * @param rcStrict The status code returne by the interpreter.
11268 */
11269DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
11270{
11271 if ( !pIemCpu->fInPatchCode
11272 && ( rcStrict == VINF_SUCCESS
11273 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
11274 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
11275 {
11276 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
11277 CPUMRawEnter(pVCpu);
11278 else
11279 {
11280 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
11281 rcStrict = VINF_EM_RESCHEDULE;
11282 }
11283 }
11284 return rcStrict;
11285}
11286#endif
11287
11288
11289/**
11290 * Execute one instruction.
11291 *
11292 * @return Strict VBox status code.
11293 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11294 */
11295VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
11296{
11297 PIEMCPU pIemCpu = &pVCpu->iem.s;
11298
11299#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11300 if (++pIemCpu->cVerifyDepth == 1)
11301 iemExecVerificationModeSetup(pIemCpu);
11302#endif
11303#ifdef LOG_ENABLED
11304 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11305 iemLogCurInstr(pVCpu, pCtx, true);
11306#endif
11307
11308 /*
11309 * Do the decoding and emulation.
11310 */
11311 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11312 if (rcStrict == VINF_SUCCESS)
11313 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11314
11315#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11316 /*
11317 * Assert some sanity.
11318 */
11319 if (pIemCpu->cVerifyDepth == 1)
11320 rcStrict = iemExecVerificationModeCheck(pIemCpu, rcStrict);
11321 pIemCpu->cVerifyDepth--;
11322#endif
11323#ifdef IN_RC
11324 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11325#endif
11326 if (rcStrict != VINF_SUCCESS)
11327 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11328 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11329 return rcStrict;
11330}
11331
11332
11333VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11334{
11335 PIEMCPU pIemCpu = &pVCpu->iem.s;
11336 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11337 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11338
11339 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11340 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11341 if (rcStrict == VINF_SUCCESS)
11342 {
11343 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11344 if (pcbWritten)
11345 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11346 }
11347
11348#ifdef IN_RC
11349 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11350#endif
11351 return rcStrict;
11352}
11353
11354
11355VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11356 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11357{
11358 PIEMCPU pIemCpu = &pVCpu->iem.s;
11359 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11360 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11361
11362 VBOXSTRICTRC rcStrict;
11363 if ( cbOpcodeBytes
11364 && pCtx->rip == OpcodeBytesPC)
11365 {
11366 iemInitDecoder(pIemCpu, false);
11367 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11368 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11369 rcStrict = VINF_SUCCESS;
11370 }
11371 else
11372 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11373 if (rcStrict == VINF_SUCCESS)
11374 {
11375 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11376 }
11377
11378#ifdef IN_RC
11379 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11380#endif
11381 return rcStrict;
11382}
11383
11384
11385VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11386{
11387 PIEMCPU pIemCpu = &pVCpu->iem.s;
11388 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11389 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11390
11391 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11392 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11393 if (rcStrict == VINF_SUCCESS)
11394 {
11395 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11396 if (pcbWritten)
11397 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11398 }
11399
11400#ifdef IN_RC
11401 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11402#endif
11403 return rcStrict;
11404}
11405
11406
11407VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11408 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11409{
11410 PIEMCPU pIemCpu = &pVCpu->iem.s;
11411 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11412 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11413
11414 VBOXSTRICTRC rcStrict;
11415 if ( cbOpcodeBytes
11416 && pCtx->rip == OpcodeBytesPC)
11417 {
11418 iemInitDecoder(pIemCpu, true);
11419 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11420 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11421 rcStrict = VINF_SUCCESS;
11422 }
11423 else
11424 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11425 if (rcStrict == VINF_SUCCESS)
11426 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11427
11428#ifdef IN_RC
11429 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11430#endif
11431 return rcStrict;
11432}
11433
11434
11435/**
11436 * For debugging DISGetParamSize, may come in handy.
11437 *
11438 * @returns Strict VBox status code.
11439 * @param pVCpu The cross context virtual CPU structure of the
11440 * calling EMT.
11441 * @param pCtxCore The context core structure.
11442 * @param OpcodeBytesPC The PC of the opcode bytes.
11443 * @param pvOpcodeBytes Prefeched opcode bytes.
11444 * @param cbOpcodeBytes Number of prefetched bytes.
11445 * @param pcbWritten Where to return the number of bytes written.
11446 * Optional.
11447 */
11448VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11449 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
11450 uint32_t *pcbWritten)
11451{
11452 PIEMCPU pIemCpu = &pVCpu->iem.s;
11453 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11454 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11455
11456 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11457 VBOXSTRICTRC rcStrict;
11458 if ( cbOpcodeBytes
11459 && pCtx->rip == OpcodeBytesPC)
11460 {
11461 iemInitDecoder(pIemCpu, true);
11462 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11463 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11464 rcStrict = VINF_SUCCESS;
11465 }
11466 else
11467 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11468 if (rcStrict == VINF_SUCCESS)
11469 {
11470 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11471 if (pcbWritten)
11472 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11473 }
11474
11475#ifdef IN_RC
11476 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11477#endif
11478 return rcStrict;
11479}
11480
11481
11482VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11483{
11484 PIEMCPU pIemCpu = &pVCpu->iem.s;
11485
11486 /*
11487 * See if there is an interrupt pending in TRPM and inject it if we can.
11488 */
11489#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11490 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11491# ifdef IEM_VERIFICATION_MODE_FULL
11492 pIemCpu->uInjectCpl = UINT8_MAX;
11493# endif
11494 if ( pCtx->eflags.Bits.u1IF
11495 && TRPMHasTrap(pVCpu)
11496 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11497 {
11498 uint8_t u8TrapNo;
11499 TRPMEVENT enmType;
11500 RTGCUINT uErrCode;
11501 RTGCPTR uCr2;
11502 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11503 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11504 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11505 TRPMResetTrap(pVCpu);
11506 }
11507#else
11508 iemExecVerificationModeSetup(pIemCpu);
11509 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11510#endif
11511
11512 /*
11513 * Log the state.
11514 */
11515#ifdef LOG_ENABLED
11516 iemLogCurInstr(pVCpu, pCtx, true);
11517#endif
11518
11519 /*
11520 * Do the decoding and emulation.
11521 */
11522 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11523 if (rcStrict == VINF_SUCCESS)
11524 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11525
11526#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11527 /*
11528 * Assert some sanity.
11529 */
11530 rcStrict = iemExecVerificationModeCheck(pIemCpu, rcStrict);
11531#endif
11532
11533 /*
11534 * Maybe re-enter raw-mode and log.
11535 */
11536#ifdef IN_RC
11537 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11538#endif
11539 if (rcStrict != VINF_SUCCESS)
11540 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11541 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11542 return rcStrict;
11543}
11544
11545
11546
11547/**
11548 * Injects a trap, fault, abort, software interrupt or external interrupt.
11549 *
11550 * The parameter list matches TRPMQueryTrapAll pretty closely.
11551 *
11552 * @returns Strict VBox status code.
11553 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11554 * @param u8TrapNo The trap number.
11555 * @param enmType What type is it (trap/fault/abort), software
11556 * interrupt or hardware interrupt.
11557 * @param uErrCode The error code if applicable.
11558 * @param uCr2 The CR2 value if applicable.
11559 * @param cbInstr The instruction length (only relevant for
11560 * software interrupts).
11561 */
11562VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11563 uint8_t cbInstr)
11564{
11565 iemInitDecoder(&pVCpu->iem.s, false);
11566#ifdef DBGFTRACE_ENABLED
11567 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11568 u8TrapNo, enmType, uErrCode, uCr2);
11569#endif
11570
11571 uint32_t fFlags;
11572 switch (enmType)
11573 {
11574 case TRPM_HARDWARE_INT:
11575 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11576 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11577 uErrCode = uCr2 = 0;
11578 break;
11579
11580 case TRPM_SOFTWARE_INT:
11581 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11582 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11583 uErrCode = uCr2 = 0;
11584 break;
11585
11586 case TRPM_TRAP:
11587 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11588 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11589 if (u8TrapNo == X86_XCPT_PF)
11590 fFlags |= IEM_XCPT_FLAGS_CR2;
11591 switch (u8TrapNo)
11592 {
11593 case X86_XCPT_DF:
11594 case X86_XCPT_TS:
11595 case X86_XCPT_NP:
11596 case X86_XCPT_SS:
11597 case X86_XCPT_PF:
11598 case X86_XCPT_AC:
11599 fFlags |= IEM_XCPT_FLAGS_ERR;
11600 break;
11601
11602 case X86_XCPT_NMI:
11603 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11604 break;
11605 }
11606 break;
11607
11608 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11609 }
11610
11611 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11612}
11613
11614
11615/**
11616 * Injects the active TRPM event.
11617 *
11618 * @returns Strict VBox status code.
11619 * @param pVCpu The cross context virtual CPU structure.
11620 */
11621VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11622{
11623#ifndef IEM_IMPLEMENTS_TASKSWITCH
11624 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11625#else
11626 uint8_t u8TrapNo;
11627 TRPMEVENT enmType;
11628 RTGCUINT uErrCode;
11629 RTGCUINTPTR uCr2;
11630 uint8_t cbInstr;
11631 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11632 if (RT_FAILURE(rc))
11633 return rc;
11634
11635 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11636
11637 /** @todo Are there any other codes that imply the event was successfully
11638 * delivered to the guest? See @bugref{6607}. */
11639 if ( rcStrict == VINF_SUCCESS
11640 || rcStrict == VINF_IEM_RAISED_XCPT)
11641 {
11642 TRPMResetTrap(pVCpu);
11643 }
11644 return rcStrict;
11645#endif
11646}
11647
11648
11649VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11650{
11651 return VERR_NOT_IMPLEMENTED;
11652}
11653
11654
11655VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11656{
11657 return VERR_NOT_IMPLEMENTED;
11658}
11659
11660
11661#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11662/**
11663 * Executes a IRET instruction with default operand size.
11664 *
11665 * This is for PATM.
11666 *
11667 * @returns VBox status code.
11668 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11669 * @param pCtxCore The register frame.
11670 */
11671VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11672{
11673 PIEMCPU pIemCpu = &pVCpu->iem.s;
11674 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11675
11676 iemCtxCoreToCtx(pCtx, pCtxCore);
11677 iemInitDecoder(pIemCpu);
11678 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11679 if (rcStrict == VINF_SUCCESS)
11680 iemCtxToCtxCore(pCtxCore, pCtx);
11681 else
11682 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11683 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11684 return rcStrict;
11685}
11686#endif
11687
11688
11689/**
11690 * Macro used by the IEMExec* method to check the given instruction length.
11691 *
11692 * Will return on failure!
11693 *
11694 * @param a_cbInstr The given instruction length.
11695 * @param a_cbMin The minimum length.
11696 */
11697#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11698 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11699 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11700
11701
11702/**
11703 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
11704 *
11705 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
11706 *
11707 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
11708 * @param pIemCpu The IEM per-CPU structure.
11709 * @param rcStrict The status code to fiddle.
11710 */
11711DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
11712{
11713 iemUninitExec(pIemCpu);
11714#ifdef IN_RC
11715 return iemRCRawMaybeReenter(pIemCpu, IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx),
11716 iemExecStatusCodeFiddling(pIemCpu, rcStrict));
11717#else
11718 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11719#endif
11720}
11721
11722
11723/**
11724 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11725 *
11726 * This API ASSUMES that the caller has already verified that the guest code is
11727 * allowed to access the I/O port. (The I/O port is in the DX register in the
11728 * guest state.)
11729 *
11730 * @returns Strict VBox status code.
11731 * @param pVCpu The cross context virtual CPU structure.
11732 * @param cbValue The size of the I/O port access (1, 2, or 4).
11733 * @param enmAddrMode The addressing mode.
11734 * @param fRepPrefix Indicates whether a repeat prefix is used
11735 * (doesn't matter which for this instruction).
11736 * @param cbInstr The instruction length in bytes.
11737 * @param iEffSeg The effective segment address.
11738 * @param fIoChecked Whether the access to the I/O port has been
11739 * checked or not. It's typically checked in the
11740 * HM scenario.
11741 */
11742VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11743 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
11744{
11745 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11746 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11747
11748 /*
11749 * State init.
11750 */
11751 PIEMCPU pIemCpu = &pVCpu->iem.s;
11752 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11753
11754 /*
11755 * Switch orgy for getting to the right handler.
11756 */
11757 VBOXSTRICTRC rcStrict;
11758 if (fRepPrefix)
11759 {
11760 switch (enmAddrMode)
11761 {
11762 case IEMMODE_16BIT:
11763 switch (cbValue)
11764 {
11765 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11766 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11767 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11768 default:
11769 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11770 }
11771 break;
11772
11773 case IEMMODE_32BIT:
11774 switch (cbValue)
11775 {
11776 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11777 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11778 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11779 default:
11780 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11781 }
11782 break;
11783
11784 case IEMMODE_64BIT:
11785 switch (cbValue)
11786 {
11787 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11788 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11789 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11790 default:
11791 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11792 }
11793 break;
11794
11795 default:
11796 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11797 }
11798 }
11799 else
11800 {
11801 switch (enmAddrMode)
11802 {
11803 case IEMMODE_16BIT:
11804 switch (cbValue)
11805 {
11806 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11807 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11808 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11809 default:
11810 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11811 }
11812 break;
11813
11814 case IEMMODE_32BIT:
11815 switch (cbValue)
11816 {
11817 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11818 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11819 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11820 default:
11821 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11822 }
11823 break;
11824
11825 case IEMMODE_64BIT:
11826 switch (cbValue)
11827 {
11828 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11829 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11830 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11831 default:
11832 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11833 }
11834 break;
11835
11836 default:
11837 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11838 }
11839 }
11840
11841 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11842}
11843
11844
11845/**
11846 * Interface for HM and EM for executing string I/O IN (read) instructions.
11847 *
11848 * This API ASSUMES that the caller has already verified that the guest code is
11849 * allowed to access the I/O port. (The I/O port is in the DX register in the
11850 * guest state.)
11851 *
11852 * @returns Strict VBox status code.
11853 * @param pVCpu The cross context virtual CPU structure.
11854 * @param cbValue The size of the I/O port access (1, 2, or 4).
11855 * @param enmAddrMode The addressing mode.
11856 * @param fRepPrefix Indicates whether a repeat prefix is used
11857 * (doesn't matter which for this instruction).
11858 * @param cbInstr The instruction length in bytes.
11859 * @param fIoChecked Whether the access to the I/O port has been
11860 * checked or not. It's typically checked in the
11861 * HM scenario.
11862 */
11863VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11864 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
11865{
11866 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11867
11868 /*
11869 * State init.
11870 */
11871 PIEMCPU pIemCpu = &pVCpu->iem.s;
11872 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11873
11874 /*
11875 * Switch orgy for getting to the right handler.
11876 */
11877 VBOXSTRICTRC rcStrict;
11878 if (fRepPrefix)
11879 {
11880 switch (enmAddrMode)
11881 {
11882 case IEMMODE_16BIT:
11883 switch (cbValue)
11884 {
11885 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, fIoChecked); break;
11886 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, fIoChecked); break;
11887 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, fIoChecked); break;
11888 default:
11889 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11890 }
11891 break;
11892
11893 case IEMMODE_32BIT:
11894 switch (cbValue)
11895 {
11896 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, fIoChecked); break;
11897 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, fIoChecked); break;
11898 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, fIoChecked); break;
11899 default:
11900 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11901 }
11902 break;
11903
11904 case IEMMODE_64BIT:
11905 switch (cbValue)
11906 {
11907 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, fIoChecked); break;
11908 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, fIoChecked); break;
11909 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, fIoChecked); break;
11910 default:
11911 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11912 }
11913 break;
11914
11915 default:
11916 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11917 }
11918 }
11919 else
11920 {
11921 switch (enmAddrMode)
11922 {
11923 case IEMMODE_16BIT:
11924 switch (cbValue)
11925 {
11926 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, fIoChecked); break;
11927 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, fIoChecked); break;
11928 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, fIoChecked); break;
11929 default:
11930 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11931 }
11932 break;
11933
11934 case IEMMODE_32BIT:
11935 switch (cbValue)
11936 {
11937 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, fIoChecked); break;
11938 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, fIoChecked); break;
11939 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, fIoChecked); break;
11940 default:
11941 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11942 }
11943 break;
11944
11945 case IEMMODE_64BIT:
11946 switch (cbValue)
11947 {
11948 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, fIoChecked); break;
11949 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, fIoChecked); break;
11950 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, fIoChecked); break;
11951 default:
11952 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11953 }
11954 break;
11955
11956 default:
11957 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11958 }
11959 }
11960
11961 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11962}
11963
11964
11965/**
11966 * Interface for rawmode to write execute an OUT instruction.
11967 *
11968 * @returns Strict VBox status code.
11969 * @param pVCpu The cross context virtual CPU structure.
11970 * @param cbInstr The instruction length in bytes.
11971 * @param u16Port The port to read.
11972 * @param cbReg The register size.
11973 *
11974 * @remarks In ring-0 not all of the state needs to be synced in.
11975 */
11976VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
11977{
11978 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11979 Assert(cbReg <= 4 && cbReg != 3);
11980
11981 PIEMCPU pIemCpu = &pVCpu->iem.s;
11982 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11983 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
11984 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11985}
11986
11987
11988/**
11989 * Interface for rawmode to write execute an IN instruction.
11990 *
11991 * @returns Strict VBox status code.
11992 * @param pVCpu The cross context virtual CPU structure.
11993 * @param cbInstr The instruction length in bytes.
11994 * @param u16Port The port to read.
11995 * @param cbReg The register size.
11996 */
11997VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
11998{
11999 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
12000 Assert(cbReg <= 4 && cbReg != 3);
12001
12002 PIEMCPU pIemCpu = &pVCpu->iem.s;
12003 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
12004 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
12005 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
12006}
12007
12008
12009/**
12010 * Interface for HM and EM to write to a CRx register.
12011 *
12012 * @returns Strict VBox status code.
12013 * @param pVCpu The cross context virtual CPU structure.
12014 * @param cbInstr The instruction length in bytes.
12015 * @param iCrReg The control register number (destination).
12016 * @param iGReg The general purpose register number (source).
12017 *
12018 * @remarks In ring-0 not all of the state needs to be synced in.
12019 */
12020VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
12021{
12022 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
12023 Assert(iCrReg < 16);
12024 Assert(iGReg < 16);
12025
12026 PIEMCPU pIemCpu = &pVCpu->iem.s;
12027 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
12028 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
12029 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
12030}
12031
12032
12033/**
12034 * Interface for HM and EM to read from a CRx register.
12035 *
12036 * @returns Strict VBox status code.
12037 * @param pVCpu The cross context virtual CPU structure.
12038 * @param cbInstr The instruction length in bytes.
12039 * @param iGReg The general purpose register number (destination).
12040 * @param iCrReg The control register number (source).
12041 *
12042 * @remarks In ring-0 not all of the state needs to be synced in.
12043 */
12044VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
12045{
12046 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
12047 Assert(iCrReg < 16);
12048 Assert(iGReg < 16);
12049
12050 PIEMCPU pIemCpu = &pVCpu->iem.s;
12051 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
12052 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
12053 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
12054}
12055
12056
12057/**
12058 * Interface for HM and EM to clear the CR0[TS] bit.
12059 *
12060 * @returns Strict VBox status code.
12061 * @param pVCpu The cross context virtual CPU structure.
12062 * @param cbInstr The instruction length in bytes.
12063 *
12064 * @remarks In ring-0 not all of the state needs to be synced in.
12065 */
12066VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
12067{
12068 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
12069
12070 PIEMCPU pIemCpu = &pVCpu->iem.s;
12071 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
12072 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
12073 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
12074}
12075
12076
12077/**
12078 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
12079 *
12080 * @returns Strict VBox status code.
12081 * @param pVCpu The cross context virtual CPU structure.
12082 * @param cbInstr The instruction length in bytes.
12083 * @param uValue The value to load into CR0.
12084 *
12085 * @remarks In ring-0 not all of the state needs to be synced in.
12086 */
12087VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
12088{
12089 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
12090
12091 PIEMCPU pIemCpu = &pVCpu->iem.s;
12092 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
12093 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
12094 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
12095}
12096
12097
12098/**
12099 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
12100 *
12101 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
12102 *
12103 * @returns Strict VBox status code.
12104 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
12105 * @param cbInstr The instruction length in bytes.
12106 * @remarks In ring-0 not all of the state needs to be synced in.
12107 * @thread EMT(pVCpu)
12108 */
12109VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
12110{
12111 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
12112
12113 PIEMCPU pIemCpu = &pVCpu->iem.s;
12114 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
12115 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
12116 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
12117}
12118
12119#ifdef IN_RING3
12120
12121/**
12122 * Handles the unlikely and probably fatal merge cases.
12123 *
12124 * @returns Merged status code.
12125 * @param rcStrict Current EM status code.
12126 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
12127 * with @a rcStrict.
12128 * @param iMemMap The memory mapping index. For error reporting only.
12129 * @param pIemCpu The IEMCPU structure of the calling EMT, for error
12130 * reporting only.
12131 */
12132DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
12133 unsigned iMemMap, PIEMCPU pIemCpu)
12134{
12135 if (RT_FAILURE_NP(rcStrict))
12136 return rcStrict;
12137
12138 if (RT_FAILURE_NP(rcStrictCommit))
12139 return rcStrictCommit;
12140
12141 if (rcStrict == rcStrictCommit)
12142 return rcStrictCommit;
12143
12144 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
12145 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
12146 pIemCpu->aMemMappings[iMemMap].fAccess,
12147 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pIemCpu->aMemBbMappings[iMemMap].cbFirst,
12148 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pIemCpu->aMemBbMappings[iMemMap].cbSecond));
12149 return VERR_IOM_FF_STATUS_IPE;
12150}
12151
12152
12153/**
12154 * Helper for IOMR3ProcessForceFlag.
12155 *
12156 * @returns Merged status code.
12157 * @param rcStrict Current EM status code.
12158 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
12159 * with @a rcStrict.
12160 * @param iMemMap The memory mapping index. For error reporting only.
12161 * @param pIemCpu The IEMCPU structure of the calling EMT, for error
12162 * reporting only.
12163 */
12164DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PIEMCPU pIemCpu)
12165{
12166 /* Simple. */
12167 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
12168 return rcStrictCommit;
12169
12170 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
12171 return rcStrict;
12172
12173 /* EM scheduling status codes. */
12174 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
12175 && rcStrict <= VINF_EM_LAST))
12176 {
12177 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
12178 && rcStrictCommit <= VINF_EM_LAST))
12179 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
12180 }
12181
12182 /* Unlikely */
12183 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pIemCpu);
12184}
12185
12186
12187/**
12188 * Called by force-flag handling code when VMCPU_FF_IEM is set.
12189 *
12190 * @returns Merge between @a rcStrict and what the commit operation returned.
12191 * @param pVM The cross context VM structure.
12192 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
12193 * @param rcStrict The status code returned by ring-0 or raw-mode.
12194 */
12195VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
12196{
12197 PIEMCPU pIemCpu = &pVCpu->iem.s;
12198
12199 /*
12200 * Reset the pending commit.
12201 */
12202 AssertMsg( (pIemCpu->aMemMappings[0].fAccess | pIemCpu->aMemMappings[1].fAccess | pIemCpu->aMemMappings[2].fAccess)
12203 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
12204 ("%#x %#x %#x\n",
12205 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess, pIemCpu->aMemMappings[2].fAccess));
12206 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
12207
12208 /*
12209 * Commit the pending bounce buffers (usually just one).
12210 */
12211 unsigned cBufs = 0;
12212 unsigned iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
12213 while (iMemMap-- > 0)
12214 if (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
12215 {
12216 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
12217 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
12218 Assert(!pIemCpu->aMemBbMappings[iMemMap].fUnassigned);
12219
12220 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
12221 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
12222 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
12223
12224 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
12225 {
12226 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
12227 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
12228 pbBuf,
12229 cbFirst,
12230 PGMACCESSORIGIN_IEM);
12231 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pIemCpu);
12232 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
12233 iMemMap, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
12234 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
12235 }
12236
12237 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
12238 {
12239 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
12240 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
12241 pbBuf + cbFirst,
12242 cbSecond,
12243 PGMACCESSORIGIN_IEM);
12244 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pIemCpu);
12245 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
12246 iMemMap, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
12247 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
12248 }
12249 cBufs++;
12250 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
12251 }
12252
12253 AssertMsg(cBufs > 0 && cBufs == pIemCpu->cActiveMappings,
12254 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pIemCpu->cActiveMappings,
12255 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess, pIemCpu->aMemMappings[2].fAccess));
12256 pIemCpu->cActiveMappings = 0;
12257 return rcStrict;
12258}
12259
12260#endif /* IN_RING3 */
12261
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette