VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 60854

Last change on this file since 60854 was 60847, checked in by vboxsync, 9 years ago

IOM: New way of defer RC+R0 I/O port writes, prepping for MMIO writes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 451.2 KB
Line 
1/* $Id: IEMAll.cpp 60847 2016-05-05 15:24:46Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85
86/*********************************************************************************************************************************
87* Header Files *
88*********************************************************************************************************************************/
89#define LOG_GROUP LOG_GROUP_IEM
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/pdm.h>
93#include <VBox/vmm/pgm.h>
94#include <internal/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/tm.h>
99#include <VBox/vmm/dbgf.h>
100#include <VBox/vmm/dbgftrace.h>
101#ifdef VBOX_WITH_RAW_MODE_NOT_R0
102# include <VBox/vmm/patm.h>
103# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
104# include <VBox/vmm/csam.h>
105# endif
106#endif
107#include "IEMInternal.h"
108#ifdef IEM_VERIFICATION_MODE_FULL
109# include <VBox/vmm/rem.h>
110# include <VBox/vmm/mm.h>
111#endif
112#include <VBox/vmm/vm.h>
113#include <VBox/log.h>
114#include <VBox/err.h>
115#include <VBox/param.h>
116#include <VBox/dis.h>
117#include <VBox/disopcode.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123
124/*********************************************************************************************************************************
125* Structures and Typedefs *
126*********************************************************************************************************************************/
127/** @typedef PFNIEMOP
128 * Pointer to an opcode decoder function.
129 */
130
131/** @def FNIEMOP_DEF
132 * Define an opcode decoder function.
133 *
134 * We're using macors for this so that adding and removing parameters as well as
135 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
136 *
137 * @param a_Name The function name.
138 */
139
140
141#if defined(__GNUC__) && defined(RT_ARCH_X86)
142typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
143# define FNIEMOP_DEF(a_Name) \
144 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
145# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
146 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
147# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
148 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
149
150#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
151typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
152# define FNIEMOP_DEF(a_Name) \
153 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
154# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
155 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
156# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
157 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
158
159#elif defined(__GNUC__)
160typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#else
169typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
170# define FNIEMOP_DEF(a_Name) \
171 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
173 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
174# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
175 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
176
177#endif
178
179
180/**
181 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
182 */
183typedef union IEMSELDESC
184{
185 /** The legacy view. */
186 X86DESC Legacy;
187 /** The long mode view. */
188 X86DESC64 Long;
189} IEMSELDESC;
190/** Pointer to a selector descriptor table entry. */
191typedef IEMSELDESC *PIEMSELDESC;
192
193
194/*********************************************************************************************************************************
195* Defined Constants And Macros *
196*********************************************************************************************************************************/
197/** Temporary hack to disable the double execution. Will be removed in favor
198 * of a dedicated execution mode in EM. */
199//#define IEM_VERIFICATION_MODE_NO_REM
200
201/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
202 * due to GCC lacking knowledge about the value range of a switch. */
203#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
204
205/**
206 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
207 * occation.
208 */
209#ifdef LOG_ENABLED
210# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
211 do { \
212 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
213 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
214 } while (0)
215#else
216# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
217 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
218#endif
219
220/**
221 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
222 * occation using the supplied logger statement.
223 *
224 * @param a_LoggerArgs What to log on failure.
225 */
226#ifdef LOG_ENABLED
227# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
228 do { \
229 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
230 /*LogFunc(a_LoggerArgs);*/ \
231 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
232 } while (0)
233#else
234# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
235 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
236#endif
237
238/**
239 * Call an opcode decoder function.
240 *
241 * We're using macors for this so that adding and removing parameters can be
242 * done as we please. See FNIEMOP_DEF.
243 */
244#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
245
246/**
247 * Call a common opcode decoder function taking one extra argument.
248 *
249 * We're using macors for this so that adding and removing parameters can be
250 * done as we please. See FNIEMOP_DEF_1.
251 */
252#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
253
254/**
255 * Call a common opcode decoder function taking one extra argument.
256 *
257 * We're using macors for this so that adding and removing parameters can be
258 * done as we please. See FNIEMOP_DEF_1.
259 */
260#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
261
262/**
263 * Check if we're currently executing in real or virtual 8086 mode.
264 *
265 * @returns @c true if it is, @c false if not.
266 * @param a_pIemCpu The IEM state of the current CPU.
267 */
268#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
269
270/**
271 * Check if we're currently executing in virtual 8086 mode.
272 *
273 * @returns @c true if it is, @c false if not.
274 * @param a_pIemCpu The IEM state of the current CPU.
275 */
276#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
277
278/**
279 * Check if we're currently executing in long mode.
280 *
281 * @returns @c true if it is, @c false if not.
282 * @param a_pIemCpu The IEM state of the current CPU.
283 */
284#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
285
286/**
287 * Check if we're currently executing in real mode.
288 *
289 * @returns @c true if it is, @c false if not.
290 * @param a_pIemCpu The IEM state of the current CPU.
291 */
292#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
293
294/**
295 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
296 * @returns PCCPUMFEATURES
297 * @param a_pIemCpu The IEM state of the current CPU.
298 */
299#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
300
301/**
302 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
303 * @returns PCCPUMFEATURES
304 * @param a_pIemCpu The IEM state of the current CPU.
305 */
306#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
307
308/**
309 * Evaluates to true if we're presenting an Intel CPU to the guest.
310 */
311#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
312
313/**
314 * Evaluates to true if we're presenting an AMD CPU to the guest.
315 */
316#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
317
318/**
319 * Check if the address is canonical.
320 */
321#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
322
323
324/*********************************************************************************************************************************
325* Global Variables *
326*********************************************************************************************************************************/
327extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
328
329
330/** Function table for the ADD instruction. */
331IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
332{
333 iemAImpl_add_u8, iemAImpl_add_u8_locked,
334 iemAImpl_add_u16, iemAImpl_add_u16_locked,
335 iemAImpl_add_u32, iemAImpl_add_u32_locked,
336 iemAImpl_add_u64, iemAImpl_add_u64_locked
337};
338
339/** Function table for the ADC instruction. */
340IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
341{
342 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
343 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
344 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
345 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
346};
347
348/** Function table for the SUB instruction. */
349IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
350{
351 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
352 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
353 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
354 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
355};
356
357/** Function table for the SBB instruction. */
358IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
359{
360 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
361 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
362 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
363 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
364};
365
366/** Function table for the OR instruction. */
367IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
368{
369 iemAImpl_or_u8, iemAImpl_or_u8_locked,
370 iemAImpl_or_u16, iemAImpl_or_u16_locked,
371 iemAImpl_or_u32, iemAImpl_or_u32_locked,
372 iemAImpl_or_u64, iemAImpl_or_u64_locked
373};
374
375/** Function table for the XOR instruction. */
376IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
377{
378 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
379 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
380 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
381 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
382};
383
384/** Function table for the AND instruction. */
385IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
386{
387 iemAImpl_and_u8, iemAImpl_and_u8_locked,
388 iemAImpl_and_u16, iemAImpl_and_u16_locked,
389 iemAImpl_and_u32, iemAImpl_and_u32_locked,
390 iemAImpl_and_u64, iemAImpl_and_u64_locked
391};
392
393/** Function table for the CMP instruction.
394 * @remarks Making operand order ASSUMPTIONS.
395 */
396IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
397{
398 iemAImpl_cmp_u8, NULL,
399 iemAImpl_cmp_u16, NULL,
400 iemAImpl_cmp_u32, NULL,
401 iemAImpl_cmp_u64, NULL
402};
403
404/** Function table for the TEST instruction.
405 * @remarks Making operand order ASSUMPTIONS.
406 */
407IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
408{
409 iemAImpl_test_u8, NULL,
410 iemAImpl_test_u16, NULL,
411 iemAImpl_test_u32, NULL,
412 iemAImpl_test_u64, NULL
413};
414
415/** Function table for the BT instruction. */
416IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
417{
418 NULL, NULL,
419 iemAImpl_bt_u16, NULL,
420 iemAImpl_bt_u32, NULL,
421 iemAImpl_bt_u64, NULL
422};
423
424/** Function table for the BTC instruction. */
425IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
426{
427 NULL, NULL,
428 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
429 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
430 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
431};
432
433/** Function table for the BTR instruction. */
434IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
435{
436 NULL, NULL,
437 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
438 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
439 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
440};
441
442/** Function table for the BTS instruction. */
443IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
444{
445 NULL, NULL,
446 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
447 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
448 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
449};
450
451/** Function table for the BSF instruction. */
452IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
453{
454 NULL, NULL,
455 iemAImpl_bsf_u16, NULL,
456 iemAImpl_bsf_u32, NULL,
457 iemAImpl_bsf_u64, NULL
458};
459
460/** Function table for the BSR instruction. */
461IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
462{
463 NULL, NULL,
464 iemAImpl_bsr_u16, NULL,
465 iemAImpl_bsr_u32, NULL,
466 iemAImpl_bsr_u64, NULL
467};
468
469/** Function table for the IMUL instruction. */
470IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
471{
472 NULL, NULL,
473 iemAImpl_imul_two_u16, NULL,
474 iemAImpl_imul_two_u32, NULL,
475 iemAImpl_imul_two_u64, NULL
476};
477
478/** Group 1 /r lookup table. */
479IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
480{
481 &g_iemAImpl_add,
482 &g_iemAImpl_or,
483 &g_iemAImpl_adc,
484 &g_iemAImpl_sbb,
485 &g_iemAImpl_and,
486 &g_iemAImpl_sub,
487 &g_iemAImpl_xor,
488 &g_iemAImpl_cmp
489};
490
491/** Function table for the INC instruction. */
492IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
493{
494 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
495 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
496 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
497 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
498};
499
500/** Function table for the DEC instruction. */
501IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
502{
503 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
504 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
505 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
506 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
507};
508
509/** Function table for the NEG instruction. */
510IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
511{
512 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
513 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
514 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
515 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
516};
517
518/** Function table for the NOT instruction. */
519IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
520{
521 iemAImpl_not_u8, iemAImpl_not_u8_locked,
522 iemAImpl_not_u16, iemAImpl_not_u16_locked,
523 iemAImpl_not_u32, iemAImpl_not_u32_locked,
524 iemAImpl_not_u64, iemAImpl_not_u64_locked
525};
526
527
528/** Function table for the ROL instruction. */
529IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
530{
531 iemAImpl_rol_u8,
532 iemAImpl_rol_u16,
533 iemAImpl_rol_u32,
534 iemAImpl_rol_u64
535};
536
537/** Function table for the ROR instruction. */
538IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
539{
540 iemAImpl_ror_u8,
541 iemAImpl_ror_u16,
542 iemAImpl_ror_u32,
543 iemAImpl_ror_u64
544};
545
546/** Function table for the RCL instruction. */
547IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
548{
549 iemAImpl_rcl_u8,
550 iemAImpl_rcl_u16,
551 iemAImpl_rcl_u32,
552 iemAImpl_rcl_u64
553};
554
555/** Function table for the RCR instruction. */
556IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
557{
558 iemAImpl_rcr_u8,
559 iemAImpl_rcr_u16,
560 iemAImpl_rcr_u32,
561 iemAImpl_rcr_u64
562};
563
564/** Function table for the SHL instruction. */
565IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
566{
567 iemAImpl_shl_u8,
568 iemAImpl_shl_u16,
569 iemAImpl_shl_u32,
570 iemAImpl_shl_u64
571};
572
573/** Function table for the SHR instruction. */
574IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
575{
576 iemAImpl_shr_u8,
577 iemAImpl_shr_u16,
578 iemAImpl_shr_u32,
579 iemAImpl_shr_u64
580};
581
582/** Function table for the SAR instruction. */
583IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
584{
585 iemAImpl_sar_u8,
586 iemAImpl_sar_u16,
587 iemAImpl_sar_u32,
588 iemAImpl_sar_u64
589};
590
591
592/** Function table for the MUL instruction. */
593IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
594{
595 iemAImpl_mul_u8,
596 iemAImpl_mul_u16,
597 iemAImpl_mul_u32,
598 iemAImpl_mul_u64
599};
600
601/** Function table for the IMUL instruction working implicitly on rAX. */
602IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
603{
604 iemAImpl_imul_u8,
605 iemAImpl_imul_u16,
606 iemAImpl_imul_u32,
607 iemAImpl_imul_u64
608};
609
610/** Function table for the DIV instruction. */
611IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
612{
613 iemAImpl_div_u8,
614 iemAImpl_div_u16,
615 iemAImpl_div_u32,
616 iemAImpl_div_u64
617};
618
619/** Function table for the MUL instruction. */
620IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
621{
622 iemAImpl_idiv_u8,
623 iemAImpl_idiv_u16,
624 iemAImpl_idiv_u32,
625 iemAImpl_idiv_u64
626};
627
628/** Function table for the SHLD instruction */
629IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
630{
631 iemAImpl_shld_u16,
632 iemAImpl_shld_u32,
633 iemAImpl_shld_u64,
634};
635
636/** Function table for the SHRD instruction */
637IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
638{
639 iemAImpl_shrd_u16,
640 iemAImpl_shrd_u32,
641 iemAImpl_shrd_u64,
642};
643
644
645/** Function table for the PUNPCKLBW instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
647/** Function table for the PUNPCKLBD instruction */
648IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
649/** Function table for the PUNPCKLDQ instruction */
650IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
651/** Function table for the PUNPCKLQDQ instruction */
652IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
653
654/** Function table for the PUNPCKHBW instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
656/** Function table for the PUNPCKHBD instruction */
657IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
658/** Function table for the PUNPCKHDQ instruction */
659IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
660/** Function table for the PUNPCKHQDQ instruction */
661IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
662
663/** Function table for the PXOR instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
665/** Function table for the PCMPEQB instruction */
666IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
667/** Function table for the PCMPEQW instruction */
668IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
669/** Function table for the PCMPEQD instruction */
670IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
671
672
673#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
674/** What IEM just wrote. */
675uint8_t g_abIemWrote[256];
676/** How much IEM just wrote. */
677size_t g_cbIemWrote;
678#endif
679
680
681/*********************************************************************************************************************************
682* Internal Functions *
683*********************************************************************************************************************************/
684IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
685IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
686IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
687IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
688/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
689IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
691IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
692IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
693IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
694IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
695IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
696IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
698IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
699IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
700IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
701IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
702IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
703IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
706IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
707IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
708IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
709IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
710IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
711IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
712IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
713IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
714IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
715IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
716IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
717
718#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
719IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
720#endif
721IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
722IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
723
724
725
726/**
727 * Sets the pass up status.
728 *
729 * @returns VINF_SUCCESS.
730 * @param pIemCpu The per CPU IEM state of the calling thread.
731 * @param rcPassUp The pass up status. Must be informational.
732 * VINF_SUCCESS is not allowed.
733 */
734IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
735{
736 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
737
738 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
739 if (rcOldPassUp == VINF_SUCCESS)
740 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
741 /* If both are EM scheduling codes, use EM priority rules. */
742 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
743 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
744 {
745 if (rcPassUp < rcOldPassUp)
746 {
747 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
748 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
749 }
750 else
751 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
752 }
753 /* Override EM scheduling with specific status code. */
754 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
755 {
756 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
757 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
758 }
759 /* Don't override specific status code, first come first served. */
760 else
761 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
762 return VINF_SUCCESS;
763}
764
765
766/**
767 * Calculates the CPU mode.
768 *
769 * This is mainly for updating IEMCPU::enmCpuMode.
770 *
771 * @returns CPU mode.
772 * @param pCtx The register context for the CPU.
773 */
774DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
775{
776 if (CPUMIsGuestIn64BitCodeEx(pCtx))
777 return IEMMODE_64BIT;
778 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
779 return IEMMODE_32BIT;
780 return IEMMODE_16BIT;
781}
782
783
784/**
785 * Initializes the execution state.
786 *
787 * @param pIemCpu The per CPU IEM state.
788 * @param fBypassHandlers Whether to bypass access handlers.
789 *
790 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
791 * side-effects in strict builds.
792 */
793DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
794{
795 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
796 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
797
798 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
799 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
800
801#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
802 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
804 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
805 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
807 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
808 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
809 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
810#endif
811
812#ifdef VBOX_WITH_RAW_MODE_NOT_R0
813 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
814#endif
815 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
816 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
817#ifdef VBOX_STRICT
818 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
819 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
820 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
821 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
822 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
823 pIemCpu->uRexReg = 127;
824 pIemCpu->uRexB = 127;
825 pIemCpu->uRexIndex = 127;
826 pIemCpu->iEffSeg = 127;
827 pIemCpu->offOpcode = 127;
828 pIemCpu->cbOpcode = 127;
829#endif
830
831 pIemCpu->cActiveMappings = 0;
832 pIemCpu->iNextMapping = 0;
833 pIemCpu->rcPassUp = VINF_SUCCESS;
834 pIemCpu->fBypassHandlers = fBypassHandlers;
835#ifdef VBOX_WITH_RAW_MODE_NOT_R0
836 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
837 && pCtx->cs.u64Base == 0
838 && pCtx->cs.u32Limit == UINT32_MAX
839 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
840 if (!pIemCpu->fInPatchCode)
841 CPUMRawLeave(pVCpu, VINF_SUCCESS);
842#endif
843
844#ifdef IEM_VERIFICATION_MODE_FULL
845 pIemCpu->fNoRemSavedByExec = pIemCpu->fNoRem;
846 pIemCpu->fNoRem = true;
847#endif
848}
849
850
851/**
852 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
853 *
854 * @param pIemCpu The per CPU IEM state.
855 */
856DECLINLINE(void) iemUninitExec(PIEMCPU pIemCpu)
857{
858#ifdef IEM_VERIFICATION_MODE_FULL
859 pIemCpu->fNoRem = pIemCpu->fNoRemSavedByExec;
860#endif
861#ifdef VBOX_STRICT
862 pIemCpu->cbOpcode = 0;
863#else
864 NOREF(pIemCpu);
865#endif
866}
867
868
869/**
870 * Initializes the decoder state.
871 *
872 * @param pIemCpu The per CPU IEM state.
873 * @param fBypassHandlers Whether to bypass access handlers.
874 */
875DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
876{
877 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
878 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
879
880 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
881 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
882
883#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
885 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
886 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
887 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
888 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
889 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
890 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
891 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
892#endif
893
894#ifdef VBOX_WITH_RAW_MODE_NOT_R0
895 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
896#endif
897 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
898#ifdef IEM_VERIFICATION_MODE_FULL
899 if (pIemCpu->uInjectCpl != UINT8_MAX)
900 pIemCpu->uCpl = pIemCpu->uInjectCpl;
901#endif
902 IEMMODE enmMode = iemCalcCpuMode(pCtx);
903 pIemCpu->enmCpuMode = enmMode;
904 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
905 pIemCpu->enmEffAddrMode = enmMode;
906 if (enmMode != IEMMODE_64BIT)
907 {
908 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
909 pIemCpu->enmEffOpSize = enmMode;
910 }
911 else
912 {
913 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
914 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
915 }
916 pIemCpu->fPrefixes = 0;
917 pIemCpu->uRexReg = 0;
918 pIemCpu->uRexB = 0;
919 pIemCpu->uRexIndex = 0;
920 pIemCpu->iEffSeg = X86_SREG_DS;
921 pIemCpu->offOpcode = 0;
922 pIemCpu->cbOpcode = 0;
923 pIemCpu->cActiveMappings = 0;
924 pIemCpu->iNextMapping = 0;
925 pIemCpu->rcPassUp = VINF_SUCCESS;
926 pIemCpu->fBypassHandlers = fBypassHandlers;
927#ifdef VBOX_WITH_RAW_MODE_NOT_R0
928 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
929 && pCtx->cs.u64Base == 0
930 && pCtx->cs.u32Limit == UINT32_MAX
931 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
932 if (!pIemCpu->fInPatchCode)
933 CPUMRawLeave(pVCpu, VINF_SUCCESS);
934#endif
935
936#ifdef DBGFTRACE_ENABLED
937 switch (enmMode)
938 {
939 case IEMMODE_64BIT:
940 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
941 break;
942 case IEMMODE_32BIT:
943 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
944 break;
945 case IEMMODE_16BIT:
946 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
947 break;
948 }
949#endif
950}
951
952
953/**
954 * Prefetch opcodes the first time when starting executing.
955 *
956 * @returns Strict VBox status code.
957 * @param pIemCpu The IEM state.
958 * @param fBypassHandlers Whether to bypass access handlers.
959 */
960IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
961{
962#ifdef IEM_VERIFICATION_MODE_FULL
963 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
964#endif
965 iemInitDecoder(pIemCpu, fBypassHandlers);
966
967 /*
968 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
969 *
970 * First translate CS:rIP to a physical address.
971 */
972 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
973 uint32_t cbToTryRead;
974 RTGCPTR GCPtrPC;
975 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
976 {
977 cbToTryRead = PAGE_SIZE;
978 GCPtrPC = pCtx->rip;
979 if (!IEM_IS_CANONICAL(GCPtrPC))
980 return iemRaiseGeneralProtectionFault0(pIemCpu);
981 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
982 }
983 else
984 {
985 uint32_t GCPtrPC32 = pCtx->eip;
986 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
987 if (GCPtrPC32 > pCtx->cs.u32Limit)
988 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
989 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
990 if (!cbToTryRead) /* overflowed */
991 {
992 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
993 cbToTryRead = UINT32_MAX;
994 }
995 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
996 Assert(GCPtrPC <= UINT32_MAX);
997 }
998
999#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1000 /* Allow interpretation of patch manager code blocks since they can for
1001 instance throw #PFs for perfectly good reasons. */
1002 if (pIemCpu->fInPatchCode)
1003 {
1004 size_t cbRead = 0;
1005 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
1006 AssertRCReturn(rc, rc);
1007 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1008 return VINF_SUCCESS;
1009 }
1010#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1011
1012 RTGCPHYS GCPhys;
1013 uint64_t fFlags;
1014 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
1015 if (RT_FAILURE(rc))
1016 {
1017 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1018 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1019 }
1020 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1021 {
1022 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1023 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1024 }
1025 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1026 {
1027 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1028 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1029 }
1030 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1031 /** @todo Check reserved bits and such stuff. PGM is better at doing
1032 * that, so do it when implementing the guest virtual address
1033 * TLB... */
1034
1035#ifdef IEM_VERIFICATION_MODE_FULL
1036 /*
1037 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1038 * instruction.
1039 */
1040 /** @todo optimize this differently by not using PGMPhysRead. */
1041 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1042 pIemCpu->GCPhysOpcodes = GCPhys;
1043 if ( offPrevOpcodes < cbOldOpcodes
1044 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1045 {
1046 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1047 Assert(cbNew <= RT_ELEMENTS(pIemCpu->abOpcode));
1048 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1049 pIemCpu->cbOpcode = cbNew;
1050 return VINF_SUCCESS;
1051 }
1052#endif
1053
1054 /*
1055 * Read the bytes at this address.
1056 */
1057 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1058#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1059 size_t cbActual;
1060 if ( PATMIsEnabled(pVM)
1061 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1062 {
1063 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1064 Assert(cbActual > 0);
1065 pIemCpu->cbOpcode = (uint8_t)cbActual;
1066 }
1067 else
1068#endif
1069 {
1070 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1071 if (cbToTryRead > cbLeftOnPage)
1072 cbToTryRead = cbLeftOnPage;
1073 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1074 cbToTryRead = sizeof(pIemCpu->abOpcode);
1075
1076 if (!pIemCpu->fBypassHandlers)
1077 {
1078 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1079 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1080 { /* likely */ }
1081 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1082 {
1083 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1084 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1085 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1086 }
1087 else
1088 {
1089 Log((RT_SUCCESS(rcStrict)
1090 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1091 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1092 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1093 return rcStrict;
1094 }
1095 }
1096 else
1097 {
1098 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1099 if (RT_SUCCESS(rc))
1100 { /* likely */ }
1101 else
1102 {
1103 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1104 GCPtrPC, GCPhys, rc, cbToTryRead));
1105 return rc;
1106 }
1107 }
1108 pIemCpu->cbOpcode = cbToTryRead;
1109 }
1110
1111 return VINF_SUCCESS;
1112}
1113
1114
1115/**
1116 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1117 * exception if it fails.
1118 *
1119 * @returns Strict VBox status code.
1120 * @param pIemCpu The IEM state.
1121 * @param cbMin The minimum number of bytes relative offOpcode
1122 * that must be read.
1123 */
1124IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1125{
1126 /*
1127 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1128 *
1129 * First translate CS:rIP to a physical address.
1130 */
1131 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1132 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1133 uint32_t cbToTryRead;
1134 RTGCPTR GCPtrNext;
1135 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1136 {
1137 cbToTryRead = PAGE_SIZE;
1138 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1139 if (!IEM_IS_CANONICAL(GCPtrNext))
1140 return iemRaiseGeneralProtectionFault0(pIemCpu);
1141 }
1142 else
1143 {
1144 uint32_t GCPtrNext32 = pCtx->eip;
1145 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1146 GCPtrNext32 += pIemCpu->cbOpcode;
1147 if (GCPtrNext32 > pCtx->cs.u32Limit)
1148 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1149 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1150 if (!cbToTryRead) /* overflowed */
1151 {
1152 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1153 cbToTryRead = UINT32_MAX;
1154 /** @todo check out wrapping around the code segment. */
1155 }
1156 if (cbToTryRead < cbMin - cbLeft)
1157 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1158 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1159 }
1160
1161 /* Only read up to the end of the page, and make sure we don't read more
1162 than the opcode buffer can hold. */
1163 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1164 if (cbToTryRead > cbLeftOnPage)
1165 cbToTryRead = cbLeftOnPage;
1166 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1167 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1168/** @todo r=bird: Convert assertion into undefined opcode exception? */
1169 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1170
1171#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1172 /* Allow interpretation of patch manager code blocks since they can for
1173 instance throw #PFs for perfectly good reasons. */
1174 if (pIemCpu->fInPatchCode)
1175 {
1176 size_t cbRead = 0;
1177 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1178 AssertRCReturn(rc, rc);
1179 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1180 return VINF_SUCCESS;
1181 }
1182#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1183
1184 RTGCPHYS GCPhys;
1185 uint64_t fFlags;
1186 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1187 if (RT_FAILURE(rc))
1188 {
1189 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1190 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1191 }
1192 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1193 {
1194 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1195 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1196 }
1197 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1198 {
1199 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1200 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1201 }
1202 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1203 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1204 /** @todo Check reserved bits and such stuff. PGM is better at doing
1205 * that, so do it when implementing the guest virtual address
1206 * TLB... */
1207
1208 /*
1209 * Read the bytes at this address.
1210 *
1211 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1212 * and since PATM should only patch the start of an instruction there
1213 * should be no need to check again here.
1214 */
1215 if (!pIemCpu->fBypassHandlers)
1216 {
1217 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1218 cbToTryRead, PGMACCESSORIGIN_IEM);
1219 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1220 { /* likely */ }
1221 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1222 {
1223 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1224 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1225 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1226 }
1227 else
1228 {
1229 Log((RT_SUCCESS(rcStrict)
1230 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1231 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1232 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1233 return rcStrict;
1234 }
1235 }
1236 else
1237 {
1238 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1239 if (RT_SUCCESS(rc))
1240 { /* likely */ }
1241 else
1242 {
1243 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1244 return rc;
1245 }
1246 }
1247 pIemCpu->cbOpcode += cbToTryRead;
1248 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1249
1250 return VINF_SUCCESS;
1251}
1252
1253
1254/**
1255 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1256 *
1257 * @returns Strict VBox status code.
1258 * @param pIemCpu The IEM state.
1259 * @param pb Where to return the opcode byte.
1260 */
1261DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1262{
1263 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1264 if (rcStrict == VINF_SUCCESS)
1265 {
1266 uint8_t offOpcode = pIemCpu->offOpcode;
1267 *pb = pIemCpu->abOpcode[offOpcode];
1268 pIemCpu->offOpcode = offOpcode + 1;
1269 }
1270 else
1271 *pb = 0;
1272 return rcStrict;
1273}
1274
1275
1276/**
1277 * Fetches the next opcode byte.
1278 *
1279 * @returns Strict VBox status code.
1280 * @param pIemCpu The IEM state.
1281 * @param pu8 Where to return the opcode byte.
1282 */
1283DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1284{
1285 uint8_t const offOpcode = pIemCpu->offOpcode;
1286 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1287 {
1288 *pu8 = pIemCpu->abOpcode[offOpcode];
1289 pIemCpu->offOpcode = offOpcode + 1;
1290 return VINF_SUCCESS;
1291 }
1292 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1293}
1294
1295
1296/**
1297 * Fetches the next opcode byte, returns automatically on failure.
1298 *
1299 * @param a_pu8 Where to return the opcode byte.
1300 * @remark Implicitly references pIemCpu.
1301 */
1302#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1303 do \
1304 { \
1305 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1306 if (rcStrict2 != VINF_SUCCESS) \
1307 return rcStrict2; \
1308 } while (0)
1309
1310
1311/**
1312 * Fetches the next signed byte from the opcode stream.
1313 *
1314 * @returns Strict VBox status code.
1315 * @param pIemCpu The IEM state.
1316 * @param pi8 Where to return the signed byte.
1317 */
1318DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1319{
1320 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1321}
1322
1323
1324/**
1325 * Fetches the next signed byte from the opcode stream, returning automatically
1326 * on failure.
1327 *
1328 * @param a_pi8 Where to return the signed byte.
1329 * @remark Implicitly references pIemCpu.
1330 */
1331#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1332 do \
1333 { \
1334 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1335 if (rcStrict2 != VINF_SUCCESS) \
1336 return rcStrict2; \
1337 } while (0)
1338
1339
1340/**
1341 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1342 *
1343 * @returns Strict VBox status code.
1344 * @param pIemCpu The IEM state.
1345 * @param pu16 Where to return the opcode dword.
1346 */
1347DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1348{
1349 uint8_t u8;
1350 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1351 if (rcStrict == VINF_SUCCESS)
1352 *pu16 = (int8_t)u8;
1353 return rcStrict;
1354}
1355
1356
1357/**
1358 * Fetches the next signed byte from the opcode stream, extending it to
1359 * unsigned 16-bit.
1360 *
1361 * @returns Strict VBox status code.
1362 * @param pIemCpu The IEM state.
1363 * @param pu16 Where to return the unsigned word.
1364 */
1365DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1366{
1367 uint8_t const offOpcode = pIemCpu->offOpcode;
1368 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1369 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1370
1371 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1372 pIemCpu->offOpcode = offOpcode + 1;
1373 return VINF_SUCCESS;
1374}
1375
1376
1377/**
1378 * Fetches the next signed byte from the opcode stream and sign-extending it to
1379 * a word, returning automatically on failure.
1380 *
1381 * @param a_pu16 Where to return the word.
1382 * @remark Implicitly references pIemCpu.
1383 */
1384#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1385 do \
1386 { \
1387 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1388 if (rcStrict2 != VINF_SUCCESS) \
1389 return rcStrict2; \
1390 } while (0)
1391
1392
1393/**
1394 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1395 *
1396 * @returns Strict VBox status code.
1397 * @param pIemCpu The IEM state.
1398 * @param pu32 Where to return the opcode dword.
1399 */
1400DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1401{
1402 uint8_t u8;
1403 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1404 if (rcStrict == VINF_SUCCESS)
1405 *pu32 = (int8_t)u8;
1406 return rcStrict;
1407}
1408
1409
1410/**
1411 * Fetches the next signed byte from the opcode stream, extending it to
1412 * unsigned 32-bit.
1413 *
1414 * @returns Strict VBox status code.
1415 * @param pIemCpu The IEM state.
1416 * @param pu32 Where to return the unsigned dword.
1417 */
1418DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1419{
1420 uint8_t const offOpcode = pIemCpu->offOpcode;
1421 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1422 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1423
1424 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1425 pIemCpu->offOpcode = offOpcode + 1;
1426 return VINF_SUCCESS;
1427}
1428
1429
1430/**
1431 * Fetches the next signed byte from the opcode stream and sign-extending it to
1432 * a word, returning automatically on failure.
1433 *
1434 * @param a_pu32 Where to return the word.
1435 * @remark Implicitly references pIemCpu.
1436 */
1437#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1438 do \
1439 { \
1440 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1441 if (rcStrict2 != VINF_SUCCESS) \
1442 return rcStrict2; \
1443 } while (0)
1444
1445
1446/**
1447 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1448 *
1449 * @returns Strict VBox status code.
1450 * @param pIemCpu The IEM state.
1451 * @param pu64 Where to return the opcode qword.
1452 */
1453DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1454{
1455 uint8_t u8;
1456 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1457 if (rcStrict == VINF_SUCCESS)
1458 *pu64 = (int8_t)u8;
1459 return rcStrict;
1460}
1461
1462
1463/**
1464 * Fetches the next signed byte from the opcode stream, extending it to
1465 * unsigned 64-bit.
1466 *
1467 * @returns Strict VBox status code.
1468 * @param pIemCpu The IEM state.
1469 * @param pu64 Where to return the unsigned qword.
1470 */
1471DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1472{
1473 uint8_t const offOpcode = pIemCpu->offOpcode;
1474 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1475 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1476
1477 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1478 pIemCpu->offOpcode = offOpcode + 1;
1479 return VINF_SUCCESS;
1480}
1481
1482
1483/**
1484 * Fetches the next signed byte from the opcode stream and sign-extending it to
1485 * a word, returning automatically on failure.
1486 *
1487 * @param a_pu64 Where to return the word.
1488 * @remark Implicitly references pIemCpu.
1489 */
1490#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1491 do \
1492 { \
1493 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1494 if (rcStrict2 != VINF_SUCCESS) \
1495 return rcStrict2; \
1496 } while (0)
1497
1498
1499/**
1500 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1501 *
1502 * @returns Strict VBox status code.
1503 * @param pIemCpu The IEM state.
1504 * @param pu16 Where to return the opcode word.
1505 */
1506DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1507{
1508 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1509 if (rcStrict == VINF_SUCCESS)
1510 {
1511 uint8_t offOpcode = pIemCpu->offOpcode;
1512 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1513 pIemCpu->offOpcode = offOpcode + 2;
1514 }
1515 else
1516 *pu16 = 0;
1517 return rcStrict;
1518}
1519
1520
1521/**
1522 * Fetches the next opcode word.
1523 *
1524 * @returns Strict VBox status code.
1525 * @param pIemCpu The IEM state.
1526 * @param pu16 Where to return the opcode word.
1527 */
1528DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1529{
1530 uint8_t const offOpcode = pIemCpu->offOpcode;
1531 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1532 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1533
1534 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1535 pIemCpu->offOpcode = offOpcode + 2;
1536 return VINF_SUCCESS;
1537}
1538
1539
1540/**
1541 * Fetches the next opcode word, returns automatically on failure.
1542 *
1543 * @param a_pu16 Where to return the opcode word.
1544 * @remark Implicitly references pIemCpu.
1545 */
1546#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1547 do \
1548 { \
1549 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1550 if (rcStrict2 != VINF_SUCCESS) \
1551 return rcStrict2; \
1552 } while (0)
1553
1554
1555/**
1556 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1557 *
1558 * @returns Strict VBox status code.
1559 * @param pIemCpu The IEM state.
1560 * @param pu32 Where to return the opcode double word.
1561 */
1562DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1563{
1564 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1565 if (rcStrict == VINF_SUCCESS)
1566 {
1567 uint8_t offOpcode = pIemCpu->offOpcode;
1568 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1569 pIemCpu->offOpcode = offOpcode + 2;
1570 }
1571 else
1572 *pu32 = 0;
1573 return rcStrict;
1574}
1575
1576
1577/**
1578 * Fetches the next opcode word, zero extending it to a double word.
1579 *
1580 * @returns Strict VBox status code.
1581 * @param pIemCpu The IEM state.
1582 * @param pu32 Where to return the opcode double word.
1583 */
1584DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1585{
1586 uint8_t const offOpcode = pIemCpu->offOpcode;
1587 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1588 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1589
1590 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1591 pIemCpu->offOpcode = offOpcode + 2;
1592 return VINF_SUCCESS;
1593}
1594
1595
1596/**
1597 * Fetches the next opcode word and zero extends it to a double word, returns
1598 * automatically on failure.
1599 *
1600 * @param a_pu32 Where to return the opcode double word.
1601 * @remark Implicitly references pIemCpu.
1602 */
1603#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1604 do \
1605 { \
1606 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1607 if (rcStrict2 != VINF_SUCCESS) \
1608 return rcStrict2; \
1609 } while (0)
1610
1611
1612/**
1613 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1614 *
1615 * @returns Strict VBox status code.
1616 * @param pIemCpu The IEM state.
1617 * @param pu64 Where to return the opcode quad word.
1618 */
1619DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1620{
1621 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1622 if (rcStrict == VINF_SUCCESS)
1623 {
1624 uint8_t offOpcode = pIemCpu->offOpcode;
1625 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1626 pIemCpu->offOpcode = offOpcode + 2;
1627 }
1628 else
1629 *pu64 = 0;
1630 return rcStrict;
1631}
1632
1633
1634/**
1635 * Fetches the next opcode word, zero extending it to a quad word.
1636 *
1637 * @returns Strict VBox status code.
1638 * @param pIemCpu The IEM state.
1639 * @param pu64 Where to return the opcode quad word.
1640 */
1641DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1642{
1643 uint8_t const offOpcode = pIemCpu->offOpcode;
1644 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1645 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1646
1647 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1648 pIemCpu->offOpcode = offOpcode + 2;
1649 return VINF_SUCCESS;
1650}
1651
1652
1653/**
1654 * Fetches the next opcode word and zero extends it to a quad word, returns
1655 * automatically on failure.
1656 *
1657 * @param a_pu64 Where to return the opcode quad word.
1658 * @remark Implicitly references pIemCpu.
1659 */
1660#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1661 do \
1662 { \
1663 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1664 if (rcStrict2 != VINF_SUCCESS) \
1665 return rcStrict2; \
1666 } while (0)
1667
1668
1669/**
1670 * Fetches the next signed word from the opcode stream.
1671 *
1672 * @returns Strict VBox status code.
1673 * @param pIemCpu The IEM state.
1674 * @param pi16 Where to return the signed word.
1675 */
1676DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1677{
1678 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1679}
1680
1681
1682/**
1683 * Fetches the next signed word from the opcode stream, returning automatically
1684 * on failure.
1685 *
1686 * @param a_pi16 Where to return the signed word.
1687 * @remark Implicitly references pIemCpu.
1688 */
1689#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1690 do \
1691 { \
1692 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1693 if (rcStrict2 != VINF_SUCCESS) \
1694 return rcStrict2; \
1695 } while (0)
1696
1697
1698/**
1699 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1700 *
1701 * @returns Strict VBox status code.
1702 * @param pIemCpu The IEM state.
1703 * @param pu32 Where to return the opcode dword.
1704 */
1705DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1706{
1707 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1708 if (rcStrict == VINF_SUCCESS)
1709 {
1710 uint8_t offOpcode = pIemCpu->offOpcode;
1711 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1712 pIemCpu->abOpcode[offOpcode + 1],
1713 pIemCpu->abOpcode[offOpcode + 2],
1714 pIemCpu->abOpcode[offOpcode + 3]);
1715 pIemCpu->offOpcode = offOpcode + 4;
1716 }
1717 else
1718 *pu32 = 0;
1719 return rcStrict;
1720}
1721
1722
1723/**
1724 * Fetches the next opcode dword.
1725 *
1726 * @returns Strict VBox status code.
1727 * @param pIemCpu The IEM state.
1728 * @param pu32 Where to return the opcode double word.
1729 */
1730DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1731{
1732 uint8_t const offOpcode = pIemCpu->offOpcode;
1733 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1734 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1735
1736 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1737 pIemCpu->abOpcode[offOpcode + 1],
1738 pIemCpu->abOpcode[offOpcode + 2],
1739 pIemCpu->abOpcode[offOpcode + 3]);
1740 pIemCpu->offOpcode = offOpcode + 4;
1741 return VINF_SUCCESS;
1742}
1743
1744
1745/**
1746 * Fetches the next opcode dword, returns automatically on failure.
1747 *
1748 * @param a_pu32 Where to return the opcode dword.
1749 * @remark Implicitly references pIemCpu.
1750 */
1751#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1752 do \
1753 { \
1754 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1755 if (rcStrict2 != VINF_SUCCESS) \
1756 return rcStrict2; \
1757 } while (0)
1758
1759
1760/**
1761 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1762 *
1763 * @returns Strict VBox status code.
1764 * @param pIemCpu The IEM state.
1765 * @param pu64 Where to return the opcode dword.
1766 */
1767DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1768{
1769 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1770 if (rcStrict == VINF_SUCCESS)
1771 {
1772 uint8_t offOpcode = pIemCpu->offOpcode;
1773 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1774 pIemCpu->abOpcode[offOpcode + 1],
1775 pIemCpu->abOpcode[offOpcode + 2],
1776 pIemCpu->abOpcode[offOpcode + 3]);
1777 pIemCpu->offOpcode = offOpcode + 4;
1778 }
1779 else
1780 *pu64 = 0;
1781 return rcStrict;
1782}
1783
1784
1785/**
1786 * Fetches the next opcode dword, zero extending it to a quad word.
1787 *
1788 * @returns Strict VBox status code.
1789 * @param pIemCpu The IEM state.
1790 * @param pu64 Where to return the opcode quad word.
1791 */
1792DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1793{
1794 uint8_t const offOpcode = pIemCpu->offOpcode;
1795 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1796 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1797
1798 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1799 pIemCpu->abOpcode[offOpcode + 1],
1800 pIemCpu->abOpcode[offOpcode + 2],
1801 pIemCpu->abOpcode[offOpcode + 3]);
1802 pIemCpu->offOpcode = offOpcode + 4;
1803 return VINF_SUCCESS;
1804}
1805
1806
1807/**
1808 * Fetches the next opcode dword and zero extends it to a quad word, returns
1809 * automatically on failure.
1810 *
1811 * @param a_pu64 Where to return the opcode quad word.
1812 * @remark Implicitly references pIemCpu.
1813 */
1814#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1815 do \
1816 { \
1817 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1818 if (rcStrict2 != VINF_SUCCESS) \
1819 return rcStrict2; \
1820 } while (0)
1821
1822
1823/**
1824 * Fetches the next signed double word from the opcode stream.
1825 *
1826 * @returns Strict VBox status code.
1827 * @param pIemCpu The IEM state.
1828 * @param pi32 Where to return the signed double word.
1829 */
1830DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1831{
1832 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1833}
1834
1835/**
1836 * Fetches the next signed double word from the opcode stream, returning
1837 * automatically on failure.
1838 *
1839 * @param a_pi32 Where to return the signed double word.
1840 * @remark Implicitly references pIemCpu.
1841 */
1842#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1843 do \
1844 { \
1845 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1846 if (rcStrict2 != VINF_SUCCESS) \
1847 return rcStrict2; \
1848 } while (0)
1849
1850
1851/**
1852 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1853 *
1854 * @returns Strict VBox status code.
1855 * @param pIemCpu The IEM state.
1856 * @param pu64 Where to return the opcode qword.
1857 */
1858DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1859{
1860 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1861 if (rcStrict == VINF_SUCCESS)
1862 {
1863 uint8_t offOpcode = pIemCpu->offOpcode;
1864 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1865 pIemCpu->abOpcode[offOpcode + 1],
1866 pIemCpu->abOpcode[offOpcode + 2],
1867 pIemCpu->abOpcode[offOpcode + 3]);
1868 pIemCpu->offOpcode = offOpcode + 4;
1869 }
1870 else
1871 *pu64 = 0;
1872 return rcStrict;
1873}
1874
1875
1876/**
1877 * Fetches the next opcode dword, sign extending it into a quad word.
1878 *
1879 * @returns Strict VBox status code.
1880 * @param pIemCpu The IEM state.
1881 * @param pu64 Where to return the opcode quad word.
1882 */
1883DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1884{
1885 uint8_t const offOpcode = pIemCpu->offOpcode;
1886 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1887 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1888
1889 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1890 pIemCpu->abOpcode[offOpcode + 1],
1891 pIemCpu->abOpcode[offOpcode + 2],
1892 pIemCpu->abOpcode[offOpcode + 3]);
1893 *pu64 = i32;
1894 pIemCpu->offOpcode = offOpcode + 4;
1895 return VINF_SUCCESS;
1896}
1897
1898
1899/**
1900 * Fetches the next opcode double word and sign extends it to a quad word,
1901 * returns automatically on failure.
1902 *
1903 * @param a_pu64 Where to return the opcode quad word.
1904 * @remark Implicitly references pIemCpu.
1905 */
1906#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1907 do \
1908 { \
1909 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1910 if (rcStrict2 != VINF_SUCCESS) \
1911 return rcStrict2; \
1912 } while (0)
1913
1914
1915/**
1916 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1917 *
1918 * @returns Strict VBox status code.
1919 * @param pIemCpu The IEM state.
1920 * @param pu64 Where to return the opcode qword.
1921 */
1922DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1923{
1924 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1925 if (rcStrict == VINF_SUCCESS)
1926 {
1927 uint8_t offOpcode = pIemCpu->offOpcode;
1928 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1929 pIemCpu->abOpcode[offOpcode + 1],
1930 pIemCpu->abOpcode[offOpcode + 2],
1931 pIemCpu->abOpcode[offOpcode + 3],
1932 pIemCpu->abOpcode[offOpcode + 4],
1933 pIemCpu->abOpcode[offOpcode + 5],
1934 pIemCpu->abOpcode[offOpcode + 6],
1935 pIemCpu->abOpcode[offOpcode + 7]);
1936 pIemCpu->offOpcode = offOpcode + 8;
1937 }
1938 else
1939 *pu64 = 0;
1940 return rcStrict;
1941}
1942
1943
1944/**
1945 * Fetches the next opcode qword.
1946 *
1947 * @returns Strict VBox status code.
1948 * @param pIemCpu The IEM state.
1949 * @param pu64 Where to return the opcode qword.
1950 */
1951DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1952{
1953 uint8_t const offOpcode = pIemCpu->offOpcode;
1954 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1955 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1956
1957 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1958 pIemCpu->abOpcode[offOpcode + 1],
1959 pIemCpu->abOpcode[offOpcode + 2],
1960 pIemCpu->abOpcode[offOpcode + 3],
1961 pIemCpu->abOpcode[offOpcode + 4],
1962 pIemCpu->abOpcode[offOpcode + 5],
1963 pIemCpu->abOpcode[offOpcode + 6],
1964 pIemCpu->abOpcode[offOpcode + 7]);
1965 pIemCpu->offOpcode = offOpcode + 8;
1966 return VINF_SUCCESS;
1967}
1968
1969
1970/**
1971 * Fetches the next opcode quad word, returns automatically on failure.
1972 *
1973 * @param a_pu64 Where to return the opcode quad word.
1974 * @remark Implicitly references pIemCpu.
1975 */
1976#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1977 do \
1978 { \
1979 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1980 if (rcStrict2 != VINF_SUCCESS) \
1981 return rcStrict2; \
1982 } while (0)
1983
1984
1985/** @name Misc Worker Functions.
1986 * @{
1987 */
1988
1989
1990/**
1991 * Validates a new SS segment.
1992 *
1993 * @returns VBox strict status code.
1994 * @param pIemCpu The IEM per CPU instance data.
1995 * @param pCtx The CPU context.
1996 * @param NewSS The new SS selctor.
1997 * @param uCpl The CPL to load the stack for.
1998 * @param pDesc Where to return the descriptor.
1999 */
2000IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
2001{
2002 NOREF(pCtx);
2003
2004 /* Null selectors are not allowed (we're not called for dispatching
2005 interrupts with SS=0 in long mode). */
2006 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2007 {
2008 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2009 return iemRaiseTaskSwitchFault0(pIemCpu);
2010 }
2011
2012 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2013 if ((NewSS & X86_SEL_RPL) != uCpl)
2014 {
2015 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2016 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2017 }
2018
2019 /*
2020 * Read the descriptor.
2021 */
2022 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
2023 if (rcStrict != VINF_SUCCESS)
2024 return rcStrict;
2025
2026 /*
2027 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2028 */
2029 if (!pDesc->Legacy.Gen.u1DescType)
2030 {
2031 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2032 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2033 }
2034
2035 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2036 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2037 {
2038 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2039 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2040 }
2041 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2042 {
2043 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2044 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2045 }
2046
2047 /* Is it there? */
2048 /** @todo testcase: Is this checked before the canonical / limit check below? */
2049 if (!pDesc->Legacy.Gen.u1Present)
2050 {
2051 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2052 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2053 }
2054
2055 return VINF_SUCCESS;
2056}
2057
2058
2059/**
2060 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2061 * not.
2062 *
2063 * @param a_pIemCpu The IEM per CPU data.
2064 * @param a_pCtx The CPU context.
2065 */
2066#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2067# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2068 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2069 ? (a_pCtx)->eflags.u \
2070 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2071#else
2072# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2073 ( (a_pCtx)->eflags.u )
2074#endif
2075
2076/**
2077 * Updates the EFLAGS in the correct manner wrt. PATM.
2078 *
2079 * @param a_pIemCpu The IEM per CPU data.
2080 * @param a_pCtx The CPU context.
2081 * @param a_fEfl The new EFLAGS.
2082 */
2083#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2084# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2085 do { \
2086 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2087 (a_pCtx)->eflags.u = (a_fEfl); \
2088 else \
2089 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2090 } while (0)
2091#else
2092# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2093 do { \
2094 (a_pCtx)->eflags.u = (a_fEfl); \
2095 } while (0)
2096#endif
2097
2098
2099/** @} */
2100
2101/** @name Raising Exceptions.
2102 *
2103 * @{
2104 */
2105
2106/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2107 * @{ */
2108/** CPU exception. */
2109#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2110/** External interrupt (from PIC, APIC, whatever). */
2111#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2112/** Software interrupt (int or into, not bound).
2113 * Returns to the following instruction */
2114#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2115/** Takes an error code. */
2116#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2117/** Takes a CR2. */
2118#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2119/** Generated by the breakpoint instruction. */
2120#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2121/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2122#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2123/** @} */
2124
2125
2126/**
2127 * Loads the specified stack far pointer from the TSS.
2128 *
2129 * @returns VBox strict status code.
2130 * @param pIemCpu The IEM per CPU instance data.
2131 * @param pCtx The CPU context.
2132 * @param uCpl The CPL to load the stack for.
2133 * @param pSelSS Where to return the new stack segment.
2134 * @param puEsp Where to return the new stack pointer.
2135 */
2136IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2137 PRTSEL pSelSS, uint32_t *puEsp)
2138{
2139 VBOXSTRICTRC rcStrict;
2140 Assert(uCpl < 4);
2141
2142 switch (pCtx->tr.Attr.n.u4Type)
2143 {
2144 /*
2145 * 16-bit TSS (X86TSS16).
2146 */
2147 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2148 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2149 {
2150 uint32_t off = uCpl * 4 + 2;
2151 if (off + 4 <= pCtx->tr.u32Limit)
2152 {
2153 /** @todo check actual access pattern here. */
2154 uint32_t u32Tmp = 0; /* gcc maybe... */
2155 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2156 if (rcStrict == VINF_SUCCESS)
2157 {
2158 *puEsp = RT_LOWORD(u32Tmp);
2159 *pSelSS = RT_HIWORD(u32Tmp);
2160 return VINF_SUCCESS;
2161 }
2162 }
2163 else
2164 {
2165 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2166 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2167 }
2168 break;
2169 }
2170
2171 /*
2172 * 32-bit TSS (X86TSS32).
2173 */
2174 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2175 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2176 {
2177 uint32_t off = uCpl * 8 + 4;
2178 if (off + 7 <= pCtx->tr.u32Limit)
2179 {
2180/** @todo check actual access pattern here. */
2181 uint64_t u64Tmp;
2182 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2183 if (rcStrict == VINF_SUCCESS)
2184 {
2185 *puEsp = u64Tmp & UINT32_MAX;
2186 *pSelSS = (RTSEL)(u64Tmp >> 32);
2187 return VINF_SUCCESS;
2188 }
2189 }
2190 else
2191 {
2192 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2193 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2194 }
2195 break;
2196 }
2197
2198 default:
2199 AssertFailed();
2200 rcStrict = VERR_IEM_IPE_4;
2201 break;
2202 }
2203
2204 *puEsp = 0; /* make gcc happy */
2205 *pSelSS = 0; /* make gcc happy */
2206 return rcStrict;
2207}
2208
2209
2210/**
2211 * Loads the specified stack pointer from the 64-bit TSS.
2212 *
2213 * @returns VBox strict status code.
2214 * @param pIemCpu The IEM per CPU instance data.
2215 * @param pCtx The CPU context.
2216 * @param uCpl The CPL to load the stack for.
2217 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2218 * @param puRsp Where to return the new stack pointer.
2219 */
2220IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2221{
2222 Assert(uCpl < 4);
2223 Assert(uIst < 8);
2224 *puRsp = 0; /* make gcc happy */
2225
2226 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2227
2228 uint32_t off;
2229 if (uIst)
2230 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2231 else
2232 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2233 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2234 {
2235 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2236 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2237 }
2238
2239 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2240}
2241
2242
2243/**
2244 * Adjust the CPU state according to the exception being raised.
2245 *
2246 * @param pCtx The CPU context.
2247 * @param u8Vector The exception that has been raised.
2248 */
2249DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2250{
2251 switch (u8Vector)
2252 {
2253 case X86_XCPT_DB:
2254 pCtx->dr[7] &= ~X86_DR7_GD;
2255 break;
2256 /** @todo Read the AMD and Intel exception reference... */
2257 }
2258}
2259
2260
2261/**
2262 * Implements exceptions and interrupts for real mode.
2263 *
2264 * @returns VBox strict status code.
2265 * @param pIemCpu The IEM per CPU instance data.
2266 * @param pCtx The CPU context.
2267 * @param cbInstr The number of bytes to offset rIP by in the return
2268 * address.
2269 * @param u8Vector The interrupt / exception vector number.
2270 * @param fFlags The flags.
2271 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2272 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2273 */
2274IEM_STATIC VBOXSTRICTRC
2275iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2276 PCPUMCTX pCtx,
2277 uint8_t cbInstr,
2278 uint8_t u8Vector,
2279 uint32_t fFlags,
2280 uint16_t uErr,
2281 uint64_t uCr2)
2282{
2283 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2284 NOREF(uErr); NOREF(uCr2);
2285
2286 /*
2287 * Read the IDT entry.
2288 */
2289 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2290 {
2291 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2292 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2293 }
2294 RTFAR16 Idte;
2295 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2296 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2297 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2298 return rcStrict;
2299
2300 /*
2301 * Push the stack frame.
2302 */
2303 uint16_t *pu16Frame;
2304 uint64_t uNewRsp;
2305 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2306 if (rcStrict != VINF_SUCCESS)
2307 return rcStrict;
2308
2309 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2310#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2311 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2312 if (pIemCpu->uTargetCpu <= IEMTARGETCPU_186)
2313 fEfl |= UINT16_C(0xf000);
2314#endif
2315 pu16Frame[2] = (uint16_t)fEfl;
2316 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2317 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2318 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2319 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2320 return rcStrict;
2321
2322 /*
2323 * Load the vector address into cs:ip and make exception specific state
2324 * adjustments.
2325 */
2326 pCtx->cs.Sel = Idte.sel;
2327 pCtx->cs.ValidSel = Idte.sel;
2328 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2329 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2330 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2331 pCtx->rip = Idte.off;
2332 fEfl &= ~X86_EFL_IF;
2333 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2334
2335 /** @todo do we actually do this in real mode? */
2336 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2337 iemRaiseXcptAdjustState(pCtx, u8Vector);
2338
2339 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2340}
2341
2342
2343/**
2344 * Loads a NULL data selector into when coming from V8086 mode.
2345 *
2346 * @param pIemCpu The IEM per CPU instance data.
2347 * @param pSReg Pointer to the segment register.
2348 */
2349IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2350{
2351 pSReg->Sel = 0;
2352 pSReg->ValidSel = 0;
2353 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2354 {
2355 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2356 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2357 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2358 }
2359 else
2360 {
2361 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2362 /** @todo check this on AMD-V */
2363 pSReg->u64Base = 0;
2364 pSReg->u32Limit = 0;
2365 }
2366}
2367
2368
2369/**
2370 * Loads a segment selector during a task switch in V8086 mode.
2371 *
2372 * @param pIemCpu The IEM per CPU instance data.
2373 * @param pSReg Pointer to the segment register.
2374 * @param uSel The selector value to load.
2375 */
2376IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2377{
2378 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2379 pSReg->Sel = uSel;
2380 pSReg->ValidSel = uSel;
2381 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2382 pSReg->u64Base = uSel << 4;
2383 pSReg->u32Limit = 0xffff;
2384 pSReg->Attr.u = 0xf3;
2385}
2386
2387
2388/**
2389 * Loads a NULL data selector into a selector register, both the hidden and
2390 * visible parts, in protected mode.
2391 *
2392 * @param pIemCpu The IEM state of the calling EMT.
2393 * @param pSReg Pointer to the segment register.
2394 * @param uRpl The RPL.
2395 */
2396IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2397{
2398 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2399 * data selector in protected mode. */
2400 pSReg->Sel = uRpl;
2401 pSReg->ValidSel = uRpl;
2402 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2403 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2404 {
2405 /* VT-x (Intel 3960x) observed doing something like this. */
2406 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2407 pSReg->u32Limit = UINT32_MAX;
2408 pSReg->u64Base = 0;
2409 }
2410 else
2411 {
2412 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2413 pSReg->u32Limit = 0;
2414 pSReg->u64Base = 0;
2415 }
2416}
2417
2418
2419/**
2420 * Loads a segment selector during a task switch in protected mode.
2421 *
2422 * In this task switch scenario, we would throw \#TS exceptions rather than
2423 * \#GPs.
2424 *
2425 * @returns VBox strict status code.
2426 * @param pIemCpu The IEM per CPU instance data.
2427 * @param pSReg Pointer to the segment register.
2428 * @param uSel The new selector value.
2429 *
2430 * @remarks This does _not_ handle CS or SS.
2431 * @remarks This expects pIemCpu->uCpl to be up to date.
2432 */
2433IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2434{
2435 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2436
2437 /* Null data selector. */
2438 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2439 {
2440 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2441 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2442 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2443 return VINF_SUCCESS;
2444 }
2445
2446 /* Fetch the descriptor. */
2447 IEMSELDESC Desc;
2448 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2449 if (rcStrict != VINF_SUCCESS)
2450 {
2451 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2452 VBOXSTRICTRC_VAL(rcStrict)));
2453 return rcStrict;
2454 }
2455
2456 /* Must be a data segment or readable code segment. */
2457 if ( !Desc.Legacy.Gen.u1DescType
2458 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2459 {
2460 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2461 Desc.Legacy.Gen.u4Type));
2462 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2463 }
2464
2465 /* Check privileges for data segments and non-conforming code segments. */
2466 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2467 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2468 {
2469 /* The RPL and the new CPL must be less than or equal to the DPL. */
2470 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2471 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2472 {
2473 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2474 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2475 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2476 }
2477 }
2478
2479 /* Is it there? */
2480 if (!Desc.Legacy.Gen.u1Present)
2481 {
2482 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2483 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2484 }
2485
2486 /* The base and limit. */
2487 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2488 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2489
2490 /*
2491 * Ok, everything checked out fine. Now set the accessed bit before
2492 * committing the result into the registers.
2493 */
2494 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2495 {
2496 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2497 if (rcStrict != VINF_SUCCESS)
2498 return rcStrict;
2499 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2500 }
2501
2502 /* Commit */
2503 pSReg->Sel = uSel;
2504 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2505 pSReg->u32Limit = cbLimit;
2506 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2507 pSReg->ValidSel = uSel;
2508 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2509 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2510 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2511
2512 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2513 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2514 return VINF_SUCCESS;
2515}
2516
2517
2518/**
2519 * Performs a task switch.
2520 *
2521 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2522 * caller is responsible for performing the necessary checks (like DPL, TSS
2523 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2524 * reference for JMP, CALL, IRET.
2525 *
2526 * If the task switch is the due to a software interrupt or hardware exception,
2527 * the caller is responsible for validating the TSS selector and descriptor. See
2528 * Intel Instruction reference for INT n.
2529 *
2530 * @returns VBox strict status code.
2531 * @param pIemCpu The IEM per CPU instance data.
2532 * @param pCtx The CPU context.
2533 * @param enmTaskSwitch What caused this task switch.
2534 * @param uNextEip The EIP effective after the task switch.
2535 * @param fFlags The flags.
2536 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2537 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2538 * @param SelTSS The TSS selector of the new task.
2539 * @param pNewDescTSS Pointer to the new TSS descriptor.
2540 */
2541IEM_STATIC VBOXSTRICTRC
2542iemTaskSwitch(PIEMCPU pIemCpu,
2543 PCPUMCTX pCtx,
2544 IEMTASKSWITCH enmTaskSwitch,
2545 uint32_t uNextEip,
2546 uint32_t fFlags,
2547 uint16_t uErr,
2548 uint64_t uCr2,
2549 RTSEL SelTSS,
2550 PIEMSELDESC pNewDescTSS)
2551{
2552 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2553 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2554
2555 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2556 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2557 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2558 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2559 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2560
2561 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2562 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2563
2564 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2565 fIsNewTSS386, pCtx->eip, uNextEip));
2566
2567 /* Update CR2 in case it's a page-fault. */
2568 /** @todo This should probably be done much earlier in IEM/PGM. See
2569 * @bugref{5653#c49}. */
2570 if (fFlags & IEM_XCPT_FLAGS_CR2)
2571 pCtx->cr2 = uCr2;
2572
2573 /*
2574 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2575 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2576 */
2577 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2578 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2579 if (uNewTSSLimit < uNewTSSLimitMin)
2580 {
2581 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2582 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2583 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2584 }
2585
2586 /*
2587 * Check the current TSS limit. The last written byte to the current TSS during the
2588 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2589 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2590 *
2591 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2592 * end up with smaller than "legal" TSS limits.
2593 */
2594 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2595 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2596 if (uCurTSSLimit < uCurTSSLimitMin)
2597 {
2598 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2599 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2600 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2601 }
2602
2603 /*
2604 * Verify that the new TSS can be accessed and map it. Map only the required contents
2605 * and not the entire TSS.
2606 */
2607 void *pvNewTSS;
2608 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2609 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2610 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2611 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2612 * not perform correct translation if this happens. See Intel spec. 7.2.1
2613 * "Task-State Segment" */
2614 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2615 if (rcStrict != VINF_SUCCESS)
2616 {
2617 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2618 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2619 return rcStrict;
2620 }
2621
2622 /*
2623 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2624 */
2625 uint32_t u32EFlags = pCtx->eflags.u32;
2626 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2627 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2628 {
2629 PX86DESC pDescCurTSS;
2630 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2631 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2632 if (rcStrict != VINF_SUCCESS)
2633 {
2634 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2635 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2636 return rcStrict;
2637 }
2638
2639 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2640 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2641 if (rcStrict != VINF_SUCCESS)
2642 {
2643 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2644 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2645 return rcStrict;
2646 }
2647
2648 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2649 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2650 {
2651 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2652 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2653 u32EFlags &= ~X86_EFL_NT;
2654 }
2655 }
2656
2657 /*
2658 * Save the CPU state into the current TSS.
2659 */
2660 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2661 if (GCPtrNewTSS == GCPtrCurTSS)
2662 {
2663 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2664 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2665 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2666 }
2667 if (fIsNewTSS386)
2668 {
2669 /*
2670 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2671 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2672 */
2673 void *pvCurTSS32;
2674 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2675 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2676 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2677 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2678 if (rcStrict != VINF_SUCCESS)
2679 {
2680 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2681 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2682 return rcStrict;
2683 }
2684
2685 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2686 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2687 pCurTSS32->eip = uNextEip;
2688 pCurTSS32->eflags = u32EFlags;
2689 pCurTSS32->eax = pCtx->eax;
2690 pCurTSS32->ecx = pCtx->ecx;
2691 pCurTSS32->edx = pCtx->edx;
2692 pCurTSS32->ebx = pCtx->ebx;
2693 pCurTSS32->esp = pCtx->esp;
2694 pCurTSS32->ebp = pCtx->ebp;
2695 pCurTSS32->esi = pCtx->esi;
2696 pCurTSS32->edi = pCtx->edi;
2697 pCurTSS32->es = pCtx->es.Sel;
2698 pCurTSS32->cs = pCtx->cs.Sel;
2699 pCurTSS32->ss = pCtx->ss.Sel;
2700 pCurTSS32->ds = pCtx->ds.Sel;
2701 pCurTSS32->fs = pCtx->fs.Sel;
2702 pCurTSS32->gs = pCtx->gs.Sel;
2703
2704 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2705 if (rcStrict != VINF_SUCCESS)
2706 {
2707 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2708 VBOXSTRICTRC_VAL(rcStrict)));
2709 return rcStrict;
2710 }
2711 }
2712 else
2713 {
2714 /*
2715 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2716 */
2717 void *pvCurTSS16;
2718 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2719 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2720 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2721 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2722 if (rcStrict != VINF_SUCCESS)
2723 {
2724 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2725 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2726 return rcStrict;
2727 }
2728
2729 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2730 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2731 pCurTSS16->ip = uNextEip;
2732 pCurTSS16->flags = u32EFlags;
2733 pCurTSS16->ax = pCtx->ax;
2734 pCurTSS16->cx = pCtx->cx;
2735 pCurTSS16->dx = pCtx->dx;
2736 pCurTSS16->bx = pCtx->bx;
2737 pCurTSS16->sp = pCtx->sp;
2738 pCurTSS16->bp = pCtx->bp;
2739 pCurTSS16->si = pCtx->si;
2740 pCurTSS16->di = pCtx->di;
2741 pCurTSS16->es = pCtx->es.Sel;
2742 pCurTSS16->cs = pCtx->cs.Sel;
2743 pCurTSS16->ss = pCtx->ss.Sel;
2744 pCurTSS16->ds = pCtx->ds.Sel;
2745
2746 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2747 if (rcStrict != VINF_SUCCESS)
2748 {
2749 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2750 VBOXSTRICTRC_VAL(rcStrict)));
2751 return rcStrict;
2752 }
2753 }
2754
2755 /*
2756 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2757 */
2758 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2759 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2760 {
2761 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2762 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2763 pNewTSS->selPrev = pCtx->tr.Sel;
2764 }
2765
2766 /*
2767 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2768 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2769 */
2770 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2771 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2772 bool fNewDebugTrap;
2773 if (fIsNewTSS386)
2774 {
2775 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2776 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2777 uNewEip = pNewTSS32->eip;
2778 uNewEflags = pNewTSS32->eflags;
2779 uNewEax = pNewTSS32->eax;
2780 uNewEcx = pNewTSS32->ecx;
2781 uNewEdx = pNewTSS32->edx;
2782 uNewEbx = pNewTSS32->ebx;
2783 uNewEsp = pNewTSS32->esp;
2784 uNewEbp = pNewTSS32->ebp;
2785 uNewEsi = pNewTSS32->esi;
2786 uNewEdi = pNewTSS32->edi;
2787 uNewES = pNewTSS32->es;
2788 uNewCS = pNewTSS32->cs;
2789 uNewSS = pNewTSS32->ss;
2790 uNewDS = pNewTSS32->ds;
2791 uNewFS = pNewTSS32->fs;
2792 uNewGS = pNewTSS32->gs;
2793 uNewLdt = pNewTSS32->selLdt;
2794 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2795 }
2796 else
2797 {
2798 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2799 uNewCr3 = 0;
2800 uNewEip = pNewTSS16->ip;
2801 uNewEflags = pNewTSS16->flags;
2802 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2803 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2804 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2805 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2806 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2807 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2808 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2809 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2810 uNewES = pNewTSS16->es;
2811 uNewCS = pNewTSS16->cs;
2812 uNewSS = pNewTSS16->ss;
2813 uNewDS = pNewTSS16->ds;
2814 uNewFS = 0;
2815 uNewGS = 0;
2816 uNewLdt = pNewTSS16->selLdt;
2817 fNewDebugTrap = false;
2818 }
2819
2820 if (GCPtrNewTSS == GCPtrCurTSS)
2821 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2822 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2823
2824 /*
2825 * We're done accessing the new TSS.
2826 */
2827 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2828 if (rcStrict != VINF_SUCCESS)
2829 {
2830 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2831 return rcStrict;
2832 }
2833
2834 /*
2835 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2836 */
2837 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2838 {
2839 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2840 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2841 if (rcStrict != VINF_SUCCESS)
2842 {
2843 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2844 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2845 return rcStrict;
2846 }
2847
2848 /* Check that the descriptor indicates the new TSS is available (not busy). */
2849 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2850 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2851 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2852
2853 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2854 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2855 if (rcStrict != VINF_SUCCESS)
2856 {
2857 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2858 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2859 return rcStrict;
2860 }
2861 }
2862
2863 /*
2864 * From this point on, we're technically in the new task. We will defer exceptions
2865 * until the completion of the task switch but before executing any instructions in the new task.
2866 */
2867 pCtx->tr.Sel = SelTSS;
2868 pCtx->tr.ValidSel = SelTSS;
2869 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2870 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2871 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2872 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2873 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2874
2875 /* Set the busy bit in TR. */
2876 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2877 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2878 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2879 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2880 {
2881 uNewEflags |= X86_EFL_NT;
2882 }
2883
2884 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2885 pCtx->cr0 |= X86_CR0_TS;
2886 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2887
2888 pCtx->eip = uNewEip;
2889 pCtx->eax = uNewEax;
2890 pCtx->ecx = uNewEcx;
2891 pCtx->edx = uNewEdx;
2892 pCtx->ebx = uNewEbx;
2893 pCtx->esp = uNewEsp;
2894 pCtx->ebp = uNewEbp;
2895 pCtx->esi = uNewEsi;
2896 pCtx->edi = uNewEdi;
2897
2898 uNewEflags &= X86_EFL_LIVE_MASK;
2899 uNewEflags |= X86_EFL_RA1_MASK;
2900 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2901
2902 /*
2903 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2904 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2905 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2906 */
2907 pCtx->es.Sel = uNewES;
2908 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2909 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2910
2911 pCtx->cs.Sel = uNewCS;
2912 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2913 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2914
2915 pCtx->ss.Sel = uNewSS;
2916 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2917 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2918
2919 pCtx->ds.Sel = uNewDS;
2920 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2921 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2922
2923 pCtx->fs.Sel = uNewFS;
2924 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2925 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2926
2927 pCtx->gs.Sel = uNewGS;
2928 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2929 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2930 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2931
2932 pCtx->ldtr.Sel = uNewLdt;
2933 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2934 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2935 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2936
2937 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2938 {
2939 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2940 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2941 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2942 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2943 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2944 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2945 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2946 }
2947
2948 /*
2949 * Switch CR3 for the new task.
2950 */
2951 if ( fIsNewTSS386
2952 && (pCtx->cr0 & X86_CR0_PG))
2953 {
2954 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2955 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2956 {
2957 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2958 AssertRCSuccessReturn(rc, rc);
2959 }
2960 else
2961 pCtx->cr3 = uNewCr3;
2962
2963 /* Inform PGM. */
2964 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2965 {
2966 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2967 AssertRCReturn(rc, rc);
2968 /* ignore informational status codes */
2969 }
2970 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2971 }
2972
2973 /*
2974 * Switch LDTR for the new task.
2975 */
2976 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2977 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2978 else
2979 {
2980 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2981
2982 IEMSELDESC DescNewLdt;
2983 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2984 if (rcStrict != VINF_SUCCESS)
2985 {
2986 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2987 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2988 return rcStrict;
2989 }
2990 if ( !DescNewLdt.Legacy.Gen.u1Present
2991 || DescNewLdt.Legacy.Gen.u1DescType
2992 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2993 {
2994 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2995 uNewLdt, DescNewLdt.Legacy.u));
2996 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2997 }
2998
2999 pCtx->ldtr.ValidSel = uNewLdt;
3000 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3001 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3002 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3003 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3004 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3005 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3006 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
3007 }
3008
3009 IEMSELDESC DescSS;
3010 if (IEM_IS_V86_MODE(pIemCpu))
3011 {
3012 pIemCpu->uCpl = 3;
3013 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
3014 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
3015 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
3016 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
3017 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
3018 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
3019 }
3020 else
3021 {
3022 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
3023
3024 /*
3025 * Load the stack segment for the new task.
3026 */
3027 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3028 {
3029 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3030 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3031 }
3032
3033 /* Fetch the descriptor. */
3034 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
3035 if (rcStrict != VINF_SUCCESS)
3036 {
3037 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3038 VBOXSTRICTRC_VAL(rcStrict)));
3039 return rcStrict;
3040 }
3041
3042 /* SS must be a data segment and writable. */
3043 if ( !DescSS.Legacy.Gen.u1DescType
3044 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3045 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3046 {
3047 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3048 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3049 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3050 }
3051
3052 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3053 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3054 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3055 {
3056 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3057 uNewCpl));
3058 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3059 }
3060
3061 /* Is it there? */
3062 if (!DescSS.Legacy.Gen.u1Present)
3063 {
3064 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3065 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3066 }
3067
3068 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3069 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3070
3071 /* Set the accessed bit before committing the result into SS. */
3072 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3073 {
3074 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3075 if (rcStrict != VINF_SUCCESS)
3076 return rcStrict;
3077 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3078 }
3079
3080 /* Commit SS. */
3081 pCtx->ss.Sel = uNewSS;
3082 pCtx->ss.ValidSel = uNewSS;
3083 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3084 pCtx->ss.u32Limit = cbLimit;
3085 pCtx->ss.u64Base = u64Base;
3086 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3087 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3088
3089 /* CPL has changed, update IEM before loading rest of segments. */
3090 pIemCpu->uCpl = uNewCpl;
3091
3092 /*
3093 * Load the data segments for the new task.
3094 */
3095 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3096 if (rcStrict != VINF_SUCCESS)
3097 return rcStrict;
3098 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3099 if (rcStrict != VINF_SUCCESS)
3100 return rcStrict;
3101 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3102 if (rcStrict != VINF_SUCCESS)
3103 return rcStrict;
3104 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3105 if (rcStrict != VINF_SUCCESS)
3106 return rcStrict;
3107
3108 /*
3109 * Load the code segment for the new task.
3110 */
3111 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3112 {
3113 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3114 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3115 }
3116
3117 /* Fetch the descriptor. */
3118 IEMSELDESC DescCS;
3119 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3120 if (rcStrict != VINF_SUCCESS)
3121 {
3122 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3123 return rcStrict;
3124 }
3125
3126 /* CS must be a code segment. */
3127 if ( !DescCS.Legacy.Gen.u1DescType
3128 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3129 {
3130 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3131 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3132 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3133 }
3134
3135 /* For conforming CS, DPL must be less than or equal to the RPL. */
3136 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3137 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3138 {
3139 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3140 DescCS.Legacy.Gen.u2Dpl));
3141 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3142 }
3143
3144 /* For non-conforming CS, DPL must match RPL. */
3145 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3146 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3147 {
3148 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3149 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3150 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3151 }
3152
3153 /* Is it there? */
3154 if (!DescCS.Legacy.Gen.u1Present)
3155 {
3156 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3157 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3158 }
3159
3160 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3161 u64Base = X86DESC_BASE(&DescCS.Legacy);
3162
3163 /* Set the accessed bit before committing the result into CS. */
3164 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3165 {
3166 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3167 if (rcStrict != VINF_SUCCESS)
3168 return rcStrict;
3169 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3170 }
3171
3172 /* Commit CS. */
3173 pCtx->cs.Sel = uNewCS;
3174 pCtx->cs.ValidSel = uNewCS;
3175 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3176 pCtx->cs.u32Limit = cbLimit;
3177 pCtx->cs.u64Base = u64Base;
3178 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3179 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3180 }
3181
3182 /** @todo Debug trap. */
3183 if (fIsNewTSS386 && fNewDebugTrap)
3184 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3185
3186 /*
3187 * Construct the error code masks based on what caused this task switch.
3188 * See Intel Instruction reference for INT.
3189 */
3190 uint16_t uExt;
3191 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3192 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3193 {
3194 uExt = 1;
3195 }
3196 else
3197 uExt = 0;
3198
3199 /*
3200 * Push any error code on to the new stack.
3201 */
3202 if (fFlags & IEM_XCPT_FLAGS_ERR)
3203 {
3204 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3205 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3206 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3207
3208 /* Check that there is sufficient space on the stack. */
3209 /** @todo Factor out segment limit checking for normal/expand down segments
3210 * into a separate function. */
3211 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3212 {
3213 if ( pCtx->esp - 1 > cbLimitSS
3214 || pCtx->esp < cbStackFrame)
3215 {
3216 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3217 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3218 cbStackFrame));
3219 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3220 }
3221 }
3222 else
3223 {
3224 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3225 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3226 {
3227 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3228 cbStackFrame));
3229 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3230 }
3231 }
3232
3233
3234 if (fIsNewTSS386)
3235 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3236 else
3237 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3238 if (rcStrict != VINF_SUCCESS)
3239 {
3240 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3241 VBOXSTRICTRC_VAL(rcStrict)));
3242 return rcStrict;
3243 }
3244 }
3245
3246 /* Check the new EIP against the new CS limit. */
3247 if (pCtx->eip > pCtx->cs.u32Limit)
3248 {
3249 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3250 pCtx->eip, pCtx->cs.u32Limit));
3251 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3252 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3253 }
3254
3255 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3256 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3257}
3258
3259
3260/**
3261 * Implements exceptions and interrupts for protected mode.
3262 *
3263 * @returns VBox strict status code.
3264 * @param pIemCpu The IEM per CPU instance data.
3265 * @param pCtx The CPU context.
3266 * @param cbInstr The number of bytes to offset rIP by in the return
3267 * address.
3268 * @param u8Vector The interrupt / exception vector number.
3269 * @param fFlags The flags.
3270 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3271 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3272 */
3273IEM_STATIC VBOXSTRICTRC
3274iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3275 PCPUMCTX pCtx,
3276 uint8_t cbInstr,
3277 uint8_t u8Vector,
3278 uint32_t fFlags,
3279 uint16_t uErr,
3280 uint64_t uCr2)
3281{
3282 /*
3283 * Read the IDT entry.
3284 */
3285 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3286 {
3287 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3288 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3289 }
3290 X86DESC Idte;
3291 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3292 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3293 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3294 return rcStrict;
3295 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3296 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3297 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3298
3299 /*
3300 * Check the descriptor type, DPL and such.
3301 * ASSUMES this is done in the same order as described for call-gate calls.
3302 */
3303 if (Idte.Gate.u1DescType)
3304 {
3305 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3306 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3307 }
3308 bool fTaskGate = false;
3309 uint8_t f32BitGate = true;
3310 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3311 switch (Idte.Gate.u4Type)
3312 {
3313 case X86_SEL_TYPE_SYS_UNDEFINED:
3314 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3315 case X86_SEL_TYPE_SYS_LDT:
3316 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3317 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3318 case X86_SEL_TYPE_SYS_UNDEFINED2:
3319 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3320 case X86_SEL_TYPE_SYS_UNDEFINED3:
3321 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3322 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3323 case X86_SEL_TYPE_SYS_UNDEFINED4:
3324 {
3325 /** @todo check what actually happens when the type is wrong...
3326 * esp. call gates. */
3327 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3328 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3329 }
3330
3331 case X86_SEL_TYPE_SYS_286_INT_GATE:
3332 f32BitGate = false;
3333 case X86_SEL_TYPE_SYS_386_INT_GATE:
3334 fEflToClear |= X86_EFL_IF;
3335 break;
3336
3337 case X86_SEL_TYPE_SYS_TASK_GATE:
3338 fTaskGate = true;
3339#ifndef IEM_IMPLEMENTS_TASKSWITCH
3340 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3341#endif
3342 break;
3343
3344 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3345 f32BitGate = false;
3346 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3347 break;
3348
3349 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3350 }
3351
3352 /* Check DPL against CPL if applicable. */
3353 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3354 {
3355 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3356 {
3357 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3358 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3359 }
3360 }
3361
3362 /* Is it there? */
3363 if (!Idte.Gate.u1Present)
3364 {
3365 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3366 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3367 }
3368
3369 /* Is it a task-gate? */
3370 if (fTaskGate)
3371 {
3372 /*
3373 * Construct the error code masks based on what caused this task switch.
3374 * See Intel Instruction reference for INT.
3375 */
3376 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3377 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3378 RTSEL SelTSS = Idte.Gate.u16Sel;
3379
3380 /*
3381 * Fetch the TSS descriptor in the GDT.
3382 */
3383 IEMSELDESC DescTSS;
3384 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3385 if (rcStrict != VINF_SUCCESS)
3386 {
3387 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3388 VBOXSTRICTRC_VAL(rcStrict)));
3389 return rcStrict;
3390 }
3391
3392 /* The TSS descriptor must be a system segment and be available (not busy). */
3393 if ( DescTSS.Legacy.Gen.u1DescType
3394 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3395 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3396 {
3397 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3398 u8Vector, SelTSS, DescTSS.Legacy.au64));
3399 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3400 }
3401
3402 /* The TSS must be present. */
3403 if (!DescTSS.Legacy.Gen.u1Present)
3404 {
3405 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3406 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3407 }
3408
3409 /* Do the actual task switch. */
3410 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3411 }
3412
3413 /* A null CS is bad. */
3414 RTSEL NewCS = Idte.Gate.u16Sel;
3415 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3416 {
3417 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3418 return iemRaiseGeneralProtectionFault0(pIemCpu);
3419 }
3420
3421 /* Fetch the descriptor for the new CS. */
3422 IEMSELDESC DescCS;
3423 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3424 if (rcStrict != VINF_SUCCESS)
3425 {
3426 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3427 return rcStrict;
3428 }
3429
3430 /* Must be a code segment. */
3431 if (!DescCS.Legacy.Gen.u1DescType)
3432 {
3433 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3434 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3435 }
3436 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3437 {
3438 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3439 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3440 }
3441
3442 /* Don't allow lowering the privilege level. */
3443 /** @todo Does the lowering of privileges apply to software interrupts
3444 * only? This has bearings on the more-privileged or
3445 * same-privilege stack behavior further down. A testcase would
3446 * be nice. */
3447 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3448 {
3449 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3450 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3451 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3452 }
3453
3454 /* Make sure the selector is present. */
3455 if (!DescCS.Legacy.Gen.u1Present)
3456 {
3457 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3458 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3459 }
3460
3461 /* Check the new EIP against the new CS limit. */
3462 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3463 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3464 ? Idte.Gate.u16OffsetLow
3465 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3466 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3467 if (uNewEip > cbLimitCS)
3468 {
3469 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3470 u8Vector, uNewEip, cbLimitCS, NewCS));
3471 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3472 }
3473
3474 /* Calc the flag image to push. */
3475 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3476 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3477 fEfl &= ~X86_EFL_RF;
3478 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3479 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3480
3481 /* From V8086 mode only go to CPL 0. */
3482 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3483 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3484 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3485 {
3486 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3487 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3488 }
3489
3490 /*
3491 * If the privilege level changes, we need to get a new stack from the TSS.
3492 * This in turns means validating the new SS and ESP...
3493 */
3494 if (uNewCpl != pIemCpu->uCpl)
3495 {
3496 RTSEL NewSS;
3497 uint32_t uNewEsp;
3498 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3499 if (rcStrict != VINF_SUCCESS)
3500 return rcStrict;
3501
3502 IEMSELDESC DescSS;
3503 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3504 if (rcStrict != VINF_SUCCESS)
3505 return rcStrict;
3506
3507 /* Check that there is sufficient space for the stack frame. */
3508 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3509 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3510 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3511 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3512
3513 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3514 {
3515 if ( uNewEsp - 1 > cbLimitSS
3516 || uNewEsp < cbStackFrame)
3517 {
3518 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3519 u8Vector, NewSS, uNewEsp, cbStackFrame));
3520 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3521 }
3522 }
3523 else
3524 {
3525 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3526 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3527 {
3528 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3529 u8Vector, NewSS, uNewEsp, cbStackFrame));
3530 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3531 }
3532 }
3533
3534 /*
3535 * Start making changes.
3536 */
3537
3538 /* Create the stack frame. */
3539 RTPTRUNION uStackFrame;
3540 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3541 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3542 if (rcStrict != VINF_SUCCESS)
3543 return rcStrict;
3544 void * const pvStackFrame = uStackFrame.pv;
3545 if (f32BitGate)
3546 {
3547 if (fFlags & IEM_XCPT_FLAGS_ERR)
3548 *uStackFrame.pu32++ = uErr;
3549 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3550 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3551 uStackFrame.pu32[2] = fEfl;
3552 uStackFrame.pu32[3] = pCtx->esp;
3553 uStackFrame.pu32[4] = pCtx->ss.Sel;
3554 if (fEfl & X86_EFL_VM)
3555 {
3556 uStackFrame.pu32[1] = pCtx->cs.Sel;
3557 uStackFrame.pu32[5] = pCtx->es.Sel;
3558 uStackFrame.pu32[6] = pCtx->ds.Sel;
3559 uStackFrame.pu32[7] = pCtx->fs.Sel;
3560 uStackFrame.pu32[8] = pCtx->gs.Sel;
3561 }
3562 }
3563 else
3564 {
3565 if (fFlags & IEM_XCPT_FLAGS_ERR)
3566 *uStackFrame.pu16++ = uErr;
3567 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3568 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3569 uStackFrame.pu16[2] = fEfl;
3570 uStackFrame.pu16[3] = pCtx->sp;
3571 uStackFrame.pu16[4] = pCtx->ss.Sel;
3572 if (fEfl & X86_EFL_VM)
3573 {
3574 uStackFrame.pu16[1] = pCtx->cs.Sel;
3575 uStackFrame.pu16[5] = pCtx->es.Sel;
3576 uStackFrame.pu16[6] = pCtx->ds.Sel;
3577 uStackFrame.pu16[7] = pCtx->fs.Sel;
3578 uStackFrame.pu16[8] = pCtx->gs.Sel;
3579 }
3580 }
3581 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3582 if (rcStrict != VINF_SUCCESS)
3583 return rcStrict;
3584
3585 /* Mark the selectors 'accessed' (hope this is the correct time). */
3586 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3587 * after pushing the stack frame? (Write protect the gdt + stack to
3588 * find out.) */
3589 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3590 {
3591 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3592 if (rcStrict != VINF_SUCCESS)
3593 return rcStrict;
3594 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3595 }
3596
3597 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3598 {
3599 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3600 if (rcStrict != VINF_SUCCESS)
3601 return rcStrict;
3602 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3603 }
3604
3605 /*
3606 * Start comitting the register changes (joins with the DPL=CPL branch).
3607 */
3608 pCtx->ss.Sel = NewSS;
3609 pCtx->ss.ValidSel = NewSS;
3610 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3611 pCtx->ss.u32Limit = cbLimitSS;
3612 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3613 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3614 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3615 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3616 * SP is loaded).
3617 * Need to check the other combinations too:
3618 * - 16-bit TSS, 32-bit handler
3619 * - 32-bit TSS, 16-bit handler */
3620 if (!pCtx->ss.Attr.n.u1DefBig)
3621 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
3622 else
3623 pCtx->rsp = uNewEsp - cbStackFrame;
3624 pIemCpu->uCpl = uNewCpl;
3625
3626 if (fEfl & X86_EFL_VM)
3627 {
3628 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3629 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3630 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3631 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3632 }
3633 }
3634 /*
3635 * Same privilege, no stack change and smaller stack frame.
3636 */
3637 else
3638 {
3639 uint64_t uNewRsp;
3640 RTPTRUNION uStackFrame;
3641 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3642 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3643 if (rcStrict != VINF_SUCCESS)
3644 return rcStrict;
3645 void * const pvStackFrame = uStackFrame.pv;
3646
3647 if (f32BitGate)
3648 {
3649 if (fFlags & IEM_XCPT_FLAGS_ERR)
3650 *uStackFrame.pu32++ = uErr;
3651 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3652 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3653 uStackFrame.pu32[2] = fEfl;
3654 }
3655 else
3656 {
3657 if (fFlags & IEM_XCPT_FLAGS_ERR)
3658 *uStackFrame.pu16++ = uErr;
3659 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3660 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3661 uStackFrame.pu16[2] = fEfl;
3662 }
3663 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3664 if (rcStrict != VINF_SUCCESS)
3665 return rcStrict;
3666
3667 /* Mark the CS selector as 'accessed'. */
3668 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3669 {
3670 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3671 if (rcStrict != VINF_SUCCESS)
3672 return rcStrict;
3673 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3674 }
3675
3676 /*
3677 * Start committing the register changes (joins with the other branch).
3678 */
3679 pCtx->rsp = uNewRsp;
3680 }
3681
3682 /* ... register committing continues. */
3683 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3684 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3685 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3686 pCtx->cs.u32Limit = cbLimitCS;
3687 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3688 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3689
3690 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3691 fEfl &= ~fEflToClear;
3692 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3693
3694 if (fFlags & IEM_XCPT_FLAGS_CR2)
3695 pCtx->cr2 = uCr2;
3696
3697 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3698 iemRaiseXcptAdjustState(pCtx, u8Vector);
3699
3700 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3701}
3702
3703
3704/**
3705 * Implements exceptions and interrupts for long mode.
3706 *
3707 * @returns VBox strict status code.
3708 * @param pIemCpu The IEM per CPU instance data.
3709 * @param pCtx The CPU context.
3710 * @param cbInstr The number of bytes to offset rIP by in the return
3711 * address.
3712 * @param u8Vector The interrupt / exception vector number.
3713 * @param fFlags The flags.
3714 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3715 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3716 */
3717IEM_STATIC VBOXSTRICTRC
3718iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3719 PCPUMCTX pCtx,
3720 uint8_t cbInstr,
3721 uint8_t u8Vector,
3722 uint32_t fFlags,
3723 uint16_t uErr,
3724 uint64_t uCr2)
3725{
3726 /*
3727 * Read the IDT entry.
3728 */
3729 uint16_t offIdt = (uint16_t)u8Vector << 4;
3730 if (pCtx->idtr.cbIdt < offIdt + 7)
3731 {
3732 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3733 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3734 }
3735 X86DESC64 Idte;
3736 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3737 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3738 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3739 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3740 return rcStrict;
3741 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3742 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3743 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3744
3745 /*
3746 * Check the descriptor type, DPL and such.
3747 * ASSUMES this is done in the same order as described for call-gate calls.
3748 */
3749 if (Idte.Gate.u1DescType)
3750 {
3751 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3752 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3753 }
3754 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3755 switch (Idte.Gate.u4Type)
3756 {
3757 case AMD64_SEL_TYPE_SYS_INT_GATE:
3758 fEflToClear |= X86_EFL_IF;
3759 break;
3760 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3761 break;
3762
3763 default:
3764 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3765 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3766 }
3767
3768 /* Check DPL against CPL if applicable. */
3769 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3770 {
3771 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3772 {
3773 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3774 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3775 }
3776 }
3777
3778 /* Is it there? */
3779 if (!Idte.Gate.u1Present)
3780 {
3781 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3782 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3783 }
3784
3785 /* A null CS is bad. */
3786 RTSEL NewCS = Idte.Gate.u16Sel;
3787 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3788 {
3789 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3790 return iemRaiseGeneralProtectionFault0(pIemCpu);
3791 }
3792
3793 /* Fetch the descriptor for the new CS. */
3794 IEMSELDESC DescCS;
3795 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3796 if (rcStrict != VINF_SUCCESS)
3797 {
3798 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3799 return rcStrict;
3800 }
3801
3802 /* Must be a 64-bit code segment. */
3803 if (!DescCS.Long.Gen.u1DescType)
3804 {
3805 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3806 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3807 }
3808 if ( !DescCS.Long.Gen.u1Long
3809 || DescCS.Long.Gen.u1DefBig
3810 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3811 {
3812 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3813 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3814 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3815 }
3816
3817 /* Don't allow lowering the privilege level. For non-conforming CS
3818 selectors, the CS.DPL sets the privilege level the trap/interrupt
3819 handler runs at. For conforming CS selectors, the CPL remains
3820 unchanged, but the CS.DPL must be <= CPL. */
3821 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3822 * when CPU in Ring-0. Result \#GP? */
3823 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3824 {
3825 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3826 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3827 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3828 }
3829
3830
3831 /* Make sure the selector is present. */
3832 if (!DescCS.Legacy.Gen.u1Present)
3833 {
3834 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3835 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3836 }
3837
3838 /* Check that the new RIP is canonical. */
3839 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3840 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3841 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3842 if (!IEM_IS_CANONICAL(uNewRip))
3843 {
3844 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3845 return iemRaiseGeneralProtectionFault0(pIemCpu);
3846 }
3847
3848 /*
3849 * If the privilege level changes or if the IST isn't zero, we need to get
3850 * a new stack from the TSS.
3851 */
3852 uint64_t uNewRsp;
3853 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3854 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3855 if ( uNewCpl != pIemCpu->uCpl
3856 || Idte.Gate.u3IST != 0)
3857 {
3858 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3859 if (rcStrict != VINF_SUCCESS)
3860 return rcStrict;
3861 }
3862 else
3863 uNewRsp = pCtx->rsp;
3864 uNewRsp &= ~(uint64_t)0xf;
3865
3866 /*
3867 * Calc the flag image to push.
3868 */
3869 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3870 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3871 fEfl &= ~X86_EFL_RF;
3872 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3873 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3874
3875 /*
3876 * Start making changes.
3877 */
3878
3879 /* Create the stack frame. */
3880 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3881 RTPTRUNION uStackFrame;
3882 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3883 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3884 if (rcStrict != VINF_SUCCESS)
3885 return rcStrict;
3886 void * const pvStackFrame = uStackFrame.pv;
3887
3888 if (fFlags & IEM_XCPT_FLAGS_ERR)
3889 *uStackFrame.pu64++ = uErr;
3890 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3891 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3892 uStackFrame.pu64[2] = fEfl;
3893 uStackFrame.pu64[3] = pCtx->rsp;
3894 uStackFrame.pu64[4] = pCtx->ss.Sel;
3895 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3896 if (rcStrict != VINF_SUCCESS)
3897 return rcStrict;
3898
3899 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3900 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3901 * after pushing the stack frame? (Write protect the gdt + stack to
3902 * find out.) */
3903 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3904 {
3905 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3906 if (rcStrict != VINF_SUCCESS)
3907 return rcStrict;
3908 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3909 }
3910
3911 /*
3912 * Start comitting the register changes.
3913 */
3914 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3915 * hidden registers when interrupting 32-bit or 16-bit code! */
3916 if (uNewCpl != pIemCpu->uCpl)
3917 {
3918 pCtx->ss.Sel = 0 | uNewCpl;
3919 pCtx->ss.ValidSel = 0 | uNewCpl;
3920 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3921 pCtx->ss.u32Limit = UINT32_MAX;
3922 pCtx->ss.u64Base = 0;
3923 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3924 }
3925 pCtx->rsp = uNewRsp - cbStackFrame;
3926 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3927 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3928 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3929 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3930 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3931 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3932 pCtx->rip = uNewRip;
3933 pIemCpu->uCpl = uNewCpl;
3934
3935 fEfl &= ~fEflToClear;
3936 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3937
3938 if (fFlags & IEM_XCPT_FLAGS_CR2)
3939 pCtx->cr2 = uCr2;
3940
3941 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3942 iemRaiseXcptAdjustState(pCtx, u8Vector);
3943
3944 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3945}
3946
3947
3948/**
3949 * Implements exceptions and interrupts.
3950 *
3951 * All exceptions and interrupts goes thru this function!
3952 *
3953 * @returns VBox strict status code.
3954 * @param pIemCpu The IEM per CPU instance data.
3955 * @param cbInstr The number of bytes to offset rIP by in the return
3956 * address.
3957 * @param u8Vector The interrupt / exception vector number.
3958 * @param fFlags The flags.
3959 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3960 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3961 */
3962DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3963iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3964 uint8_t cbInstr,
3965 uint8_t u8Vector,
3966 uint32_t fFlags,
3967 uint16_t uErr,
3968 uint64_t uCr2)
3969{
3970 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3971#ifdef IN_RING0
3972 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3973 AssertRCReturn(rc, rc);
3974#endif
3975
3976 /*
3977 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3978 */
3979 if ( pCtx->eflags.Bits.u1VM
3980 && pCtx->eflags.Bits.u2IOPL != 3
3981 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3982 && (pCtx->cr0 & X86_CR0_PE) )
3983 {
3984 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3985 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3986 u8Vector = X86_XCPT_GP;
3987 uErr = 0;
3988 }
3989#ifdef DBGFTRACE_ENABLED
3990 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3991 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3992 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3993#endif
3994
3995 /*
3996 * Do recursion accounting.
3997 */
3998 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3999 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
4000 if (pIemCpu->cXcptRecursions == 0)
4001 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4002 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
4003 else
4004 {
4005 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4006 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
4007
4008 /** @todo double and tripple faults. */
4009 if (pIemCpu->cXcptRecursions >= 3)
4010 {
4011#ifdef DEBUG_bird
4012 AssertFailed();
4013#endif
4014 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4015 }
4016
4017 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
4018 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
4019 {
4020 ....
4021 } */
4022 }
4023 pIemCpu->cXcptRecursions++;
4024 pIemCpu->uCurXcpt = u8Vector;
4025 pIemCpu->fCurXcpt = fFlags;
4026
4027 /*
4028 * Extensive logging.
4029 */
4030#if defined(LOG_ENABLED) && defined(IN_RING3)
4031 if (LogIs3Enabled())
4032 {
4033 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4034 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4035 char szRegs[4096];
4036 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4037 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4038 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4039 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4040 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4041 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4042 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4043 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4044 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4045 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4046 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4047 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4048 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4049 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4050 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4051 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4052 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4053 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4054 " efer=%016VR{efer}\n"
4055 " pat=%016VR{pat}\n"
4056 " sf_mask=%016VR{sf_mask}\n"
4057 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4058 " lstar=%016VR{lstar}\n"
4059 " star=%016VR{star} cstar=%016VR{cstar}\n"
4060 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4061 );
4062
4063 char szInstr[256];
4064 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4065 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4066 szInstr, sizeof(szInstr), NULL);
4067 Log3(("%s%s\n", szRegs, szInstr));
4068 }
4069#endif /* LOG_ENABLED */
4070
4071 /*
4072 * Call the mode specific worker function.
4073 */
4074 VBOXSTRICTRC rcStrict;
4075 if (!(pCtx->cr0 & X86_CR0_PE))
4076 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4077 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4078 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4079 else
4080 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4081
4082 /*
4083 * Unwind.
4084 */
4085 pIemCpu->cXcptRecursions--;
4086 pIemCpu->uCurXcpt = uPrevXcpt;
4087 pIemCpu->fCurXcpt = fPrevXcpt;
4088 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4089 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4090 return rcStrict;
4091}
4092
4093
4094/** \#DE - 00. */
4095DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4096{
4097 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4098}
4099
4100
4101/** \#DB - 01.
4102 * @note This automatically clear DR7.GD. */
4103DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4104{
4105 /** @todo set/clear RF. */
4106 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4107 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4108}
4109
4110
4111/** \#UD - 06. */
4112DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4113{
4114 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4115}
4116
4117
4118/** \#NM - 07. */
4119DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4120{
4121 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4122}
4123
4124
4125/** \#TS(err) - 0a. */
4126DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4127{
4128 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4129}
4130
4131
4132/** \#TS(tr) - 0a. */
4133DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4134{
4135 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4136 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4137}
4138
4139
4140/** \#TS(0) - 0a. */
4141DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4142{
4143 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4144 0, 0);
4145}
4146
4147
4148/** \#TS(err) - 0a. */
4149DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4150{
4151 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4152 uSel & X86_SEL_MASK_OFF_RPL, 0);
4153}
4154
4155
4156/** \#NP(err) - 0b. */
4157DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4158{
4159 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4160}
4161
4162
4163/** \#NP(seg) - 0b. */
4164DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4165{
4166 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4167 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4168}
4169
4170
4171/** \#NP(sel) - 0b. */
4172DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4173{
4174 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4175 uSel & ~X86_SEL_RPL, 0);
4176}
4177
4178
4179/** \#SS(seg) - 0c. */
4180DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4181{
4182 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4183 uSel & ~X86_SEL_RPL, 0);
4184}
4185
4186
4187/** \#SS(err) - 0c. */
4188DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4189{
4190 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4191}
4192
4193
4194/** \#GP(n) - 0d. */
4195DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4196{
4197 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4198}
4199
4200
4201/** \#GP(0) - 0d. */
4202DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4203{
4204 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4205}
4206
4207
4208/** \#GP(sel) - 0d. */
4209DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4210{
4211 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4212 Sel & ~X86_SEL_RPL, 0);
4213}
4214
4215
4216/** \#GP(0) - 0d. */
4217DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4218{
4219 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4220}
4221
4222
4223/** \#GP(sel) - 0d. */
4224DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4225{
4226 NOREF(iSegReg); NOREF(fAccess);
4227 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4228 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4229}
4230
4231
4232/** \#GP(sel) - 0d. */
4233DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4234{
4235 NOREF(Sel);
4236 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4237}
4238
4239
4240/** \#GP(sel) - 0d. */
4241DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4242{
4243 NOREF(iSegReg); NOREF(fAccess);
4244 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4245}
4246
4247
4248/** \#PF(n) - 0e. */
4249DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4250{
4251 uint16_t uErr;
4252 switch (rc)
4253 {
4254 case VERR_PAGE_NOT_PRESENT:
4255 case VERR_PAGE_TABLE_NOT_PRESENT:
4256 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4257 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4258 uErr = 0;
4259 break;
4260
4261 default:
4262 AssertMsgFailed(("%Rrc\n", rc));
4263 case VERR_ACCESS_DENIED:
4264 uErr = X86_TRAP_PF_P;
4265 break;
4266
4267 /** @todo reserved */
4268 }
4269
4270 if (pIemCpu->uCpl == 3)
4271 uErr |= X86_TRAP_PF_US;
4272
4273 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4274 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4275 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4276 uErr |= X86_TRAP_PF_ID;
4277
4278#if 0 /* This is so much non-sense, really. Why was it done like that? */
4279 /* Note! RW access callers reporting a WRITE protection fault, will clear
4280 the READ flag before calling. So, read-modify-write accesses (RW)
4281 can safely be reported as READ faults. */
4282 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4283 uErr |= X86_TRAP_PF_RW;
4284#else
4285 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4286 {
4287 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4288 uErr |= X86_TRAP_PF_RW;
4289 }
4290#endif
4291
4292 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4293 uErr, GCPtrWhere);
4294}
4295
4296
4297/** \#MF(0) - 10. */
4298DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4299{
4300 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4301}
4302
4303
4304/** \#AC(0) - 11. */
4305DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4306{
4307 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4308}
4309
4310
4311/**
4312 * Macro for calling iemCImplRaiseDivideError().
4313 *
4314 * This enables us to add/remove arguments and force different levels of
4315 * inlining as we wish.
4316 *
4317 * @return Strict VBox status code.
4318 */
4319#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4320IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4321{
4322 NOREF(cbInstr);
4323 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4324}
4325
4326
4327/**
4328 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4329 *
4330 * This enables us to add/remove arguments and force different levels of
4331 * inlining as we wish.
4332 *
4333 * @return Strict VBox status code.
4334 */
4335#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4336IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4337{
4338 NOREF(cbInstr);
4339 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4340}
4341
4342
4343/**
4344 * Macro for calling iemCImplRaiseInvalidOpcode().
4345 *
4346 * This enables us to add/remove arguments and force different levels of
4347 * inlining as we wish.
4348 *
4349 * @return Strict VBox status code.
4350 */
4351#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4352IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4353{
4354 NOREF(cbInstr);
4355 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4356}
4357
4358
4359/** @} */
4360
4361
4362/*
4363 *
4364 * Helpers routines.
4365 * Helpers routines.
4366 * Helpers routines.
4367 *
4368 */
4369
4370/**
4371 * Recalculates the effective operand size.
4372 *
4373 * @param pIemCpu The IEM state.
4374 */
4375IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4376{
4377 switch (pIemCpu->enmCpuMode)
4378 {
4379 case IEMMODE_16BIT:
4380 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4381 break;
4382 case IEMMODE_32BIT:
4383 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4384 break;
4385 case IEMMODE_64BIT:
4386 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4387 {
4388 case 0:
4389 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4390 break;
4391 case IEM_OP_PRF_SIZE_OP:
4392 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4393 break;
4394 case IEM_OP_PRF_SIZE_REX_W:
4395 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4396 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4397 break;
4398 }
4399 break;
4400 default:
4401 AssertFailed();
4402 }
4403}
4404
4405
4406/**
4407 * Sets the default operand size to 64-bit and recalculates the effective
4408 * operand size.
4409 *
4410 * @param pIemCpu The IEM state.
4411 */
4412IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4413{
4414 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4415 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4416 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4417 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4418 else
4419 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4420}
4421
4422
4423/*
4424 *
4425 * Common opcode decoders.
4426 * Common opcode decoders.
4427 * Common opcode decoders.
4428 *
4429 */
4430//#include <iprt/mem.h>
4431
4432/**
4433 * Used to add extra details about a stub case.
4434 * @param pIemCpu The IEM per CPU state.
4435 */
4436IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4437{
4438#if defined(LOG_ENABLED) && defined(IN_RING3)
4439 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4440 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4441 char szRegs[4096];
4442 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4443 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4444 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4445 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4446 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4447 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4448 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4449 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4450 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4451 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4452 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4453 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4454 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4455 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4456 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4457 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4458 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4459 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4460 " efer=%016VR{efer}\n"
4461 " pat=%016VR{pat}\n"
4462 " sf_mask=%016VR{sf_mask}\n"
4463 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4464 " lstar=%016VR{lstar}\n"
4465 " star=%016VR{star} cstar=%016VR{cstar}\n"
4466 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4467 );
4468
4469 char szInstr[256];
4470 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4471 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4472 szInstr, sizeof(szInstr), NULL);
4473
4474 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4475#else
4476 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4477#endif
4478}
4479
4480/**
4481 * Complains about a stub.
4482 *
4483 * Providing two versions of this macro, one for daily use and one for use when
4484 * working on IEM.
4485 */
4486#if 0
4487# define IEMOP_BITCH_ABOUT_STUB() \
4488 do { \
4489 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4490 iemOpStubMsg2(pIemCpu); \
4491 RTAssertPanic(); \
4492 } while (0)
4493#else
4494# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4495#endif
4496
4497/** Stubs an opcode. */
4498#define FNIEMOP_STUB(a_Name) \
4499 FNIEMOP_DEF(a_Name) \
4500 { \
4501 IEMOP_BITCH_ABOUT_STUB(); \
4502 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4503 } \
4504 typedef int ignore_semicolon
4505
4506/** Stubs an opcode. */
4507#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4508 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4509 { \
4510 IEMOP_BITCH_ABOUT_STUB(); \
4511 NOREF(a_Name0); \
4512 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4513 } \
4514 typedef int ignore_semicolon
4515
4516/** Stubs an opcode which currently should raise \#UD. */
4517#define FNIEMOP_UD_STUB(a_Name) \
4518 FNIEMOP_DEF(a_Name) \
4519 { \
4520 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4521 return IEMOP_RAISE_INVALID_OPCODE(); \
4522 } \
4523 typedef int ignore_semicolon
4524
4525/** Stubs an opcode which currently should raise \#UD. */
4526#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4527 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4528 { \
4529 NOREF(a_Name0); \
4530 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4531 return IEMOP_RAISE_INVALID_OPCODE(); \
4532 } \
4533 typedef int ignore_semicolon
4534
4535
4536
4537/** @name Register Access.
4538 * @{
4539 */
4540
4541/**
4542 * Gets a reference (pointer) to the specified hidden segment register.
4543 *
4544 * @returns Hidden register reference.
4545 * @param pIemCpu The per CPU data.
4546 * @param iSegReg The segment register.
4547 */
4548IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4549{
4550 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4551 PCPUMSELREG pSReg;
4552 switch (iSegReg)
4553 {
4554 case X86_SREG_ES: pSReg = &pCtx->es; break;
4555 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4556 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4557 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4558 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4559 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4560 default:
4561 AssertFailedReturn(NULL);
4562 }
4563#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4564 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4565 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4566#else
4567 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4568#endif
4569 return pSReg;
4570}
4571
4572
4573/**
4574 * Gets a reference (pointer) to the specified segment register (the selector
4575 * value).
4576 *
4577 * @returns Pointer to the selector variable.
4578 * @param pIemCpu The per CPU data.
4579 * @param iSegReg The segment register.
4580 */
4581IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4582{
4583 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4584 switch (iSegReg)
4585 {
4586 case X86_SREG_ES: return &pCtx->es.Sel;
4587 case X86_SREG_CS: return &pCtx->cs.Sel;
4588 case X86_SREG_SS: return &pCtx->ss.Sel;
4589 case X86_SREG_DS: return &pCtx->ds.Sel;
4590 case X86_SREG_FS: return &pCtx->fs.Sel;
4591 case X86_SREG_GS: return &pCtx->gs.Sel;
4592 }
4593 AssertFailedReturn(NULL);
4594}
4595
4596
4597/**
4598 * Fetches the selector value of a segment register.
4599 *
4600 * @returns The selector value.
4601 * @param pIemCpu The per CPU data.
4602 * @param iSegReg The segment register.
4603 */
4604IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4605{
4606 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4607 switch (iSegReg)
4608 {
4609 case X86_SREG_ES: return pCtx->es.Sel;
4610 case X86_SREG_CS: return pCtx->cs.Sel;
4611 case X86_SREG_SS: return pCtx->ss.Sel;
4612 case X86_SREG_DS: return pCtx->ds.Sel;
4613 case X86_SREG_FS: return pCtx->fs.Sel;
4614 case X86_SREG_GS: return pCtx->gs.Sel;
4615 }
4616 AssertFailedReturn(0xffff);
4617}
4618
4619
4620/**
4621 * Gets a reference (pointer) to the specified general register.
4622 *
4623 * @returns Register reference.
4624 * @param pIemCpu The per CPU data.
4625 * @param iReg The general register.
4626 */
4627IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4628{
4629 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4630 switch (iReg)
4631 {
4632 case X86_GREG_xAX: return &pCtx->rax;
4633 case X86_GREG_xCX: return &pCtx->rcx;
4634 case X86_GREG_xDX: return &pCtx->rdx;
4635 case X86_GREG_xBX: return &pCtx->rbx;
4636 case X86_GREG_xSP: return &pCtx->rsp;
4637 case X86_GREG_xBP: return &pCtx->rbp;
4638 case X86_GREG_xSI: return &pCtx->rsi;
4639 case X86_GREG_xDI: return &pCtx->rdi;
4640 case X86_GREG_x8: return &pCtx->r8;
4641 case X86_GREG_x9: return &pCtx->r9;
4642 case X86_GREG_x10: return &pCtx->r10;
4643 case X86_GREG_x11: return &pCtx->r11;
4644 case X86_GREG_x12: return &pCtx->r12;
4645 case X86_GREG_x13: return &pCtx->r13;
4646 case X86_GREG_x14: return &pCtx->r14;
4647 case X86_GREG_x15: return &pCtx->r15;
4648 }
4649 AssertFailedReturn(NULL);
4650}
4651
4652
4653/**
4654 * Gets a reference (pointer) to the specified 8-bit general register.
4655 *
4656 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4657 *
4658 * @returns Register reference.
4659 * @param pIemCpu The per CPU data.
4660 * @param iReg The register.
4661 */
4662IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4663{
4664 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4665 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4666
4667 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4668 if (iReg >= 4)
4669 pu8Reg++;
4670 return pu8Reg;
4671}
4672
4673
4674/**
4675 * Fetches the value of a 8-bit general register.
4676 *
4677 * @returns The register value.
4678 * @param pIemCpu The per CPU data.
4679 * @param iReg The register.
4680 */
4681IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4682{
4683 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4684 return *pbSrc;
4685}
4686
4687
4688/**
4689 * Fetches the value of a 16-bit general register.
4690 *
4691 * @returns The register value.
4692 * @param pIemCpu The per CPU data.
4693 * @param iReg The register.
4694 */
4695IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4696{
4697 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4698}
4699
4700
4701/**
4702 * Fetches the value of a 32-bit general register.
4703 *
4704 * @returns The register value.
4705 * @param pIemCpu The per CPU data.
4706 * @param iReg The register.
4707 */
4708IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4709{
4710 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4711}
4712
4713
4714/**
4715 * Fetches the value of a 64-bit general register.
4716 *
4717 * @returns The register value.
4718 * @param pIemCpu The per CPU data.
4719 * @param iReg The register.
4720 */
4721IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4722{
4723 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4724}
4725
4726
4727/**
4728 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4729 *
4730 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4731 * segment limit.
4732 *
4733 * @param pIemCpu The per CPU data.
4734 * @param offNextInstr The offset of the next instruction.
4735 */
4736IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4737{
4738 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4739 switch (pIemCpu->enmEffOpSize)
4740 {
4741 case IEMMODE_16BIT:
4742 {
4743 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4744 if ( uNewIp > pCtx->cs.u32Limit
4745 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4746 return iemRaiseGeneralProtectionFault0(pIemCpu);
4747 pCtx->rip = uNewIp;
4748 break;
4749 }
4750
4751 case IEMMODE_32BIT:
4752 {
4753 Assert(pCtx->rip <= UINT32_MAX);
4754 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4755
4756 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4757 if (uNewEip > pCtx->cs.u32Limit)
4758 return iemRaiseGeneralProtectionFault0(pIemCpu);
4759 pCtx->rip = uNewEip;
4760 break;
4761 }
4762
4763 case IEMMODE_64BIT:
4764 {
4765 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4766
4767 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4768 if (!IEM_IS_CANONICAL(uNewRip))
4769 return iemRaiseGeneralProtectionFault0(pIemCpu);
4770 pCtx->rip = uNewRip;
4771 break;
4772 }
4773
4774 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4775 }
4776
4777 pCtx->eflags.Bits.u1RF = 0;
4778 return VINF_SUCCESS;
4779}
4780
4781
4782/**
4783 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4784 *
4785 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4786 * segment limit.
4787 *
4788 * @returns Strict VBox status code.
4789 * @param pIemCpu The per CPU data.
4790 * @param offNextInstr The offset of the next instruction.
4791 */
4792IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4793{
4794 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4795 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4796
4797 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4798 if ( uNewIp > pCtx->cs.u32Limit
4799 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4800 return iemRaiseGeneralProtectionFault0(pIemCpu);
4801 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4802 pCtx->rip = uNewIp;
4803 pCtx->eflags.Bits.u1RF = 0;
4804
4805 return VINF_SUCCESS;
4806}
4807
4808
4809/**
4810 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4811 *
4812 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4813 * segment limit.
4814 *
4815 * @returns Strict VBox status code.
4816 * @param pIemCpu The per CPU data.
4817 * @param offNextInstr The offset of the next instruction.
4818 */
4819IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4820{
4821 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4822 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4823
4824 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4825 {
4826 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4827
4828 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4829 if (uNewEip > pCtx->cs.u32Limit)
4830 return iemRaiseGeneralProtectionFault0(pIemCpu);
4831 pCtx->rip = uNewEip;
4832 }
4833 else
4834 {
4835 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4836
4837 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4838 if (!IEM_IS_CANONICAL(uNewRip))
4839 return iemRaiseGeneralProtectionFault0(pIemCpu);
4840 pCtx->rip = uNewRip;
4841 }
4842 pCtx->eflags.Bits.u1RF = 0;
4843 return VINF_SUCCESS;
4844}
4845
4846
4847/**
4848 * Performs a near jump to the specified address.
4849 *
4850 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4851 * segment limit.
4852 *
4853 * @param pIemCpu The per CPU data.
4854 * @param uNewRip The new RIP value.
4855 */
4856IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4857{
4858 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4859 switch (pIemCpu->enmEffOpSize)
4860 {
4861 case IEMMODE_16BIT:
4862 {
4863 Assert(uNewRip <= UINT16_MAX);
4864 if ( uNewRip > pCtx->cs.u32Limit
4865 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4866 return iemRaiseGeneralProtectionFault0(pIemCpu);
4867 /** @todo Test 16-bit jump in 64-bit mode. */
4868 pCtx->rip = uNewRip;
4869 break;
4870 }
4871
4872 case IEMMODE_32BIT:
4873 {
4874 Assert(uNewRip <= UINT32_MAX);
4875 Assert(pCtx->rip <= UINT32_MAX);
4876 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4877
4878 if (uNewRip > pCtx->cs.u32Limit)
4879 return iemRaiseGeneralProtectionFault0(pIemCpu);
4880 pCtx->rip = uNewRip;
4881 break;
4882 }
4883
4884 case IEMMODE_64BIT:
4885 {
4886 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4887
4888 if (!IEM_IS_CANONICAL(uNewRip))
4889 return iemRaiseGeneralProtectionFault0(pIemCpu);
4890 pCtx->rip = uNewRip;
4891 break;
4892 }
4893
4894 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4895 }
4896
4897 pCtx->eflags.Bits.u1RF = 0;
4898 return VINF_SUCCESS;
4899}
4900
4901
4902/**
4903 * Get the address of the top of the stack.
4904 *
4905 * @param pIemCpu The per CPU data.
4906 * @param pCtx The CPU context which SP/ESP/RSP should be
4907 * read.
4908 */
4909DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4910{
4911 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4912 return pCtx->rsp;
4913 if (pCtx->ss.Attr.n.u1DefBig)
4914 return pCtx->esp;
4915 return pCtx->sp;
4916}
4917
4918
4919/**
4920 * Updates the RIP/EIP/IP to point to the next instruction.
4921 *
4922 * This function leaves the EFLAGS.RF flag alone.
4923 *
4924 * @param pIemCpu The per CPU data.
4925 * @param cbInstr The number of bytes to add.
4926 */
4927IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4928{
4929 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4930 switch (pIemCpu->enmCpuMode)
4931 {
4932 case IEMMODE_16BIT:
4933 Assert(pCtx->rip <= UINT16_MAX);
4934 pCtx->eip += cbInstr;
4935 pCtx->eip &= UINT32_C(0xffff);
4936 break;
4937
4938 case IEMMODE_32BIT:
4939 pCtx->eip += cbInstr;
4940 Assert(pCtx->rip <= UINT32_MAX);
4941 break;
4942
4943 case IEMMODE_64BIT:
4944 pCtx->rip += cbInstr;
4945 break;
4946 default: AssertFailed();
4947 }
4948}
4949
4950
4951#if 0
4952/**
4953 * Updates the RIP/EIP/IP to point to the next instruction.
4954 *
4955 * @param pIemCpu The per CPU data.
4956 */
4957IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4958{
4959 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4960}
4961#endif
4962
4963
4964
4965/**
4966 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4967 *
4968 * @param pIemCpu The per CPU data.
4969 * @param cbInstr The number of bytes to add.
4970 */
4971IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4972{
4973 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4974
4975 pCtx->eflags.Bits.u1RF = 0;
4976
4977 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4978 switch (pIemCpu->enmCpuMode)
4979 {
4980 /** @todo investigate if EIP or RIP is really incremented. */
4981 case IEMMODE_16BIT:
4982 case IEMMODE_32BIT:
4983 pCtx->eip += cbInstr;
4984 Assert(pCtx->rip <= UINT32_MAX);
4985 break;
4986
4987 case IEMMODE_64BIT:
4988 pCtx->rip += cbInstr;
4989 break;
4990 default: AssertFailed();
4991 }
4992}
4993
4994
4995/**
4996 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4997 *
4998 * @param pIemCpu The per CPU data.
4999 */
5000IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
5001{
5002 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
5003}
5004
5005
5006/**
5007 * Adds to the stack pointer.
5008 *
5009 * @param pIemCpu The per CPU data.
5010 * @param pCtx The CPU context which SP/ESP/RSP should be
5011 * updated.
5012 * @param cbToAdd The number of bytes to add.
5013 */
5014DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
5015{
5016 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5017 pCtx->rsp += cbToAdd;
5018 else if (pCtx->ss.Attr.n.u1DefBig)
5019 pCtx->esp += cbToAdd;
5020 else
5021 pCtx->sp += cbToAdd;
5022}
5023
5024
5025/**
5026 * Subtracts from the stack pointer.
5027 *
5028 * @param pIemCpu The per CPU data.
5029 * @param pCtx The CPU context which SP/ESP/RSP should be
5030 * updated.
5031 * @param cbToSub The number of bytes to subtract.
5032 */
5033DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
5034{
5035 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5036 pCtx->rsp -= cbToSub;
5037 else if (pCtx->ss.Attr.n.u1DefBig)
5038 pCtx->esp -= cbToSub;
5039 else
5040 pCtx->sp -= cbToSub;
5041}
5042
5043
5044/**
5045 * Adds to the temporary stack pointer.
5046 *
5047 * @param pIemCpu The per CPU data.
5048 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5049 * @param cbToAdd The number of bytes to add.
5050 * @param pCtx Where to get the current stack mode.
5051 */
5052DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
5053{
5054 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5055 pTmpRsp->u += cbToAdd;
5056 else if (pCtx->ss.Attr.n.u1DefBig)
5057 pTmpRsp->DWords.dw0 += cbToAdd;
5058 else
5059 pTmpRsp->Words.w0 += cbToAdd;
5060}
5061
5062
5063/**
5064 * Subtracts from the temporary stack pointer.
5065 *
5066 * @param pIemCpu The per CPU data.
5067 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5068 * @param cbToSub The number of bytes to subtract.
5069 * @param pCtx Where to get the current stack mode.
5070 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5071 * expecting that.
5072 */
5073DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5074{
5075 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5076 pTmpRsp->u -= cbToSub;
5077 else if (pCtx->ss.Attr.n.u1DefBig)
5078 pTmpRsp->DWords.dw0 -= cbToSub;
5079 else
5080 pTmpRsp->Words.w0 -= cbToSub;
5081}
5082
5083
5084/**
5085 * Calculates the effective stack address for a push of the specified size as
5086 * well as the new RSP value (upper bits may be masked).
5087 *
5088 * @returns Effective stack addressf for the push.
5089 * @param pIemCpu The IEM per CPU data.
5090 * @param pCtx Where to get the current stack mode.
5091 * @param cbItem The size of the stack item to pop.
5092 * @param puNewRsp Where to return the new RSP value.
5093 */
5094DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5095{
5096 RTUINT64U uTmpRsp;
5097 RTGCPTR GCPtrTop;
5098 uTmpRsp.u = pCtx->rsp;
5099
5100 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5101 GCPtrTop = uTmpRsp.u -= cbItem;
5102 else if (pCtx->ss.Attr.n.u1DefBig)
5103 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5104 else
5105 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5106 *puNewRsp = uTmpRsp.u;
5107 return GCPtrTop;
5108}
5109
5110
5111/**
5112 * Gets the current stack pointer and calculates the value after a pop of the
5113 * specified size.
5114 *
5115 * @returns Current stack pointer.
5116 * @param pIemCpu The per CPU data.
5117 * @param pCtx Where to get the current stack mode.
5118 * @param cbItem The size of the stack item to pop.
5119 * @param puNewRsp Where to return the new RSP value.
5120 */
5121DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5122{
5123 RTUINT64U uTmpRsp;
5124 RTGCPTR GCPtrTop;
5125 uTmpRsp.u = pCtx->rsp;
5126
5127 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5128 {
5129 GCPtrTop = uTmpRsp.u;
5130 uTmpRsp.u += cbItem;
5131 }
5132 else if (pCtx->ss.Attr.n.u1DefBig)
5133 {
5134 GCPtrTop = uTmpRsp.DWords.dw0;
5135 uTmpRsp.DWords.dw0 += cbItem;
5136 }
5137 else
5138 {
5139 GCPtrTop = uTmpRsp.Words.w0;
5140 uTmpRsp.Words.w0 += cbItem;
5141 }
5142 *puNewRsp = uTmpRsp.u;
5143 return GCPtrTop;
5144}
5145
5146
5147/**
5148 * Calculates the effective stack address for a push of the specified size as
5149 * well as the new temporary RSP value (upper bits may be masked).
5150 *
5151 * @returns Effective stack addressf for the push.
5152 * @param pIemCpu The per CPU data.
5153 * @param pCtx Where to get the current stack mode.
5154 * @param pTmpRsp The temporary stack pointer. This is updated.
5155 * @param cbItem The size of the stack item to pop.
5156 */
5157DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5158{
5159 RTGCPTR GCPtrTop;
5160
5161 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5162 GCPtrTop = pTmpRsp->u -= cbItem;
5163 else if (pCtx->ss.Attr.n.u1DefBig)
5164 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5165 else
5166 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5167 return GCPtrTop;
5168}
5169
5170
5171/**
5172 * Gets the effective stack address for a pop of the specified size and
5173 * calculates and updates the temporary RSP.
5174 *
5175 * @returns Current stack pointer.
5176 * @param pIemCpu The per CPU data.
5177 * @param pCtx Where to get the current stack mode.
5178 * @param pTmpRsp The temporary stack pointer. This is updated.
5179 * @param cbItem The size of the stack item to pop.
5180 */
5181DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5182{
5183 RTGCPTR GCPtrTop;
5184 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5185 {
5186 GCPtrTop = pTmpRsp->u;
5187 pTmpRsp->u += cbItem;
5188 }
5189 else if (pCtx->ss.Attr.n.u1DefBig)
5190 {
5191 GCPtrTop = pTmpRsp->DWords.dw0;
5192 pTmpRsp->DWords.dw0 += cbItem;
5193 }
5194 else
5195 {
5196 GCPtrTop = pTmpRsp->Words.w0;
5197 pTmpRsp->Words.w0 += cbItem;
5198 }
5199 return GCPtrTop;
5200}
5201
5202/** @} */
5203
5204
5205/** @name FPU access and helpers.
5206 *
5207 * @{
5208 */
5209
5210
5211/**
5212 * Hook for preparing to use the host FPU.
5213 *
5214 * This is necessary in ring-0 and raw-mode context.
5215 *
5216 * @param pIemCpu The IEM per CPU data.
5217 */
5218DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5219{
5220#ifdef IN_RING3
5221 NOREF(pIemCpu);
5222#else
5223/** @todo RZ: FIXME */
5224//# error "Implement me"
5225#endif
5226}
5227
5228
5229/**
5230 * Hook for preparing to use the host FPU for SSE
5231 *
5232 * This is necessary in ring-0 and raw-mode context.
5233 *
5234 * @param pIemCpu The IEM per CPU data.
5235 */
5236DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5237{
5238 iemFpuPrepareUsage(pIemCpu);
5239}
5240
5241
5242/**
5243 * Stores a QNaN value into a FPU register.
5244 *
5245 * @param pReg Pointer to the register.
5246 */
5247DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5248{
5249 pReg->au32[0] = UINT32_C(0x00000000);
5250 pReg->au32[1] = UINT32_C(0xc0000000);
5251 pReg->au16[4] = UINT16_C(0xffff);
5252}
5253
5254
5255/**
5256 * Updates the FOP, FPU.CS and FPUIP registers.
5257 *
5258 * @param pIemCpu The IEM per CPU data.
5259 * @param pCtx The CPU context.
5260 * @param pFpuCtx The FPU context.
5261 */
5262DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5263{
5264 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5265 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5266 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5267 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5268 {
5269 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5270 * happens in real mode here based on the fnsave and fnstenv images. */
5271 pFpuCtx->CS = 0;
5272 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5273 }
5274 else
5275 {
5276 pFpuCtx->CS = pCtx->cs.Sel;
5277 pFpuCtx->FPUIP = pCtx->rip;
5278 }
5279}
5280
5281
5282/**
5283 * Updates the x87.DS and FPUDP registers.
5284 *
5285 * @param pIemCpu The IEM per CPU data.
5286 * @param pCtx The CPU context.
5287 * @param pFpuCtx The FPU context.
5288 * @param iEffSeg The effective segment register.
5289 * @param GCPtrEff The effective address relative to @a iEffSeg.
5290 */
5291DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5292{
5293 RTSEL sel;
5294 switch (iEffSeg)
5295 {
5296 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5297 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5298 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5299 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5300 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5301 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5302 default:
5303 AssertMsgFailed(("%d\n", iEffSeg));
5304 sel = pCtx->ds.Sel;
5305 }
5306 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5307 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5308 {
5309 pFpuCtx->DS = 0;
5310 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5311 }
5312 else
5313 {
5314 pFpuCtx->DS = sel;
5315 pFpuCtx->FPUDP = GCPtrEff;
5316 }
5317}
5318
5319
5320/**
5321 * Rotates the stack registers in the push direction.
5322 *
5323 * @param pFpuCtx The FPU context.
5324 * @remarks This is a complete waste of time, but fxsave stores the registers in
5325 * stack order.
5326 */
5327DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5328{
5329 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5330 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5331 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5332 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5333 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5334 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5335 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5336 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5337 pFpuCtx->aRegs[0].r80 = r80Tmp;
5338}
5339
5340
5341/**
5342 * Rotates the stack registers in the pop direction.
5343 *
5344 * @param pFpuCtx The FPU context.
5345 * @remarks This is a complete waste of time, but fxsave stores the registers in
5346 * stack order.
5347 */
5348DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5349{
5350 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5351 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5352 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5353 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5354 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5355 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5356 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5357 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5358 pFpuCtx->aRegs[7].r80 = r80Tmp;
5359}
5360
5361
5362/**
5363 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5364 * exception prevents it.
5365 *
5366 * @param pIemCpu The IEM per CPU data.
5367 * @param pResult The FPU operation result to push.
5368 * @param pFpuCtx The FPU context.
5369 */
5370IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5371{
5372 /* Update FSW and bail if there are pending exceptions afterwards. */
5373 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5374 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5375 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5376 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5377 {
5378 pFpuCtx->FSW = fFsw;
5379 return;
5380 }
5381
5382 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5383 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5384 {
5385 /* All is fine, push the actual value. */
5386 pFpuCtx->FTW |= RT_BIT(iNewTop);
5387 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5388 }
5389 else if (pFpuCtx->FCW & X86_FCW_IM)
5390 {
5391 /* Masked stack overflow, push QNaN. */
5392 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5393 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5394 }
5395 else
5396 {
5397 /* Raise stack overflow, don't push anything. */
5398 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5399 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5400 return;
5401 }
5402
5403 fFsw &= ~X86_FSW_TOP_MASK;
5404 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5405 pFpuCtx->FSW = fFsw;
5406
5407 iemFpuRotateStackPush(pFpuCtx);
5408}
5409
5410
5411/**
5412 * Stores a result in a FPU register and updates the FSW and FTW.
5413 *
5414 * @param pFpuCtx The FPU context.
5415 * @param pResult The result to store.
5416 * @param iStReg Which FPU register to store it in.
5417 */
5418IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5419{
5420 Assert(iStReg < 8);
5421 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5422 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5423 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5424 pFpuCtx->FTW |= RT_BIT(iReg);
5425 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5426}
5427
5428
5429/**
5430 * Only updates the FPU status word (FSW) with the result of the current
5431 * instruction.
5432 *
5433 * @param pFpuCtx The FPU context.
5434 * @param u16FSW The FSW output of the current instruction.
5435 */
5436IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5437{
5438 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5439 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5440}
5441
5442
5443/**
5444 * Pops one item off the FPU stack if no pending exception prevents it.
5445 *
5446 * @param pFpuCtx The FPU context.
5447 */
5448IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5449{
5450 /* Check pending exceptions. */
5451 uint16_t uFSW = pFpuCtx->FSW;
5452 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5453 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5454 return;
5455
5456 /* TOP--. */
5457 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5458 uFSW &= ~X86_FSW_TOP_MASK;
5459 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5460 pFpuCtx->FSW = uFSW;
5461
5462 /* Mark the previous ST0 as empty. */
5463 iOldTop >>= X86_FSW_TOP_SHIFT;
5464 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5465
5466 /* Rotate the registers. */
5467 iemFpuRotateStackPop(pFpuCtx);
5468}
5469
5470
5471/**
5472 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5473 *
5474 * @param pIemCpu The IEM per CPU data.
5475 * @param pResult The FPU operation result to push.
5476 */
5477IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5478{
5479 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5480 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5481 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5482 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5483}
5484
5485
5486/**
5487 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5488 * and sets FPUDP and FPUDS.
5489 *
5490 * @param pIemCpu The IEM per CPU data.
5491 * @param pResult The FPU operation result to push.
5492 * @param iEffSeg The effective segment register.
5493 * @param GCPtrEff The effective address relative to @a iEffSeg.
5494 */
5495IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5496{
5497 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5498 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5499 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5500 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5501 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5502}
5503
5504
5505/**
5506 * Replace ST0 with the first value and push the second onto the FPU stack,
5507 * unless a pending exception prevents it.
5508 *
5509 * @param pIemCpu The IEM per CPU data.
5510 * @param pResult The FPU operation result to store and push.
5511 */
5512IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5513{
5514 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5515 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5516 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5517
5518 /* Update FSW and bail if there are pending exceptions afterwards. */
5519 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5520 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5521 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5522 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5523 {
5524 pFpuCtx->FSW = fFsw;
5525 return;
5526 }
5527
5528 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5529 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5530 {
5531 /* All is fine, push the actual value. */
5532 pFpuCtx->FTW |= RT_BIT(iNewTop);
5533 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5534 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5535 }
5536 else if (pFpuCtx->FCW & X86_FCW_IM)
5537 {
5538 /* Masked stack overflow, push QNaN. */
5539 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5540 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5541 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5542 }
5543 else
5544 {
5545 /* Raise stack overflow, don't push anything. */
5546 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5547 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5548 return;
5549 }
5550
5551 fFsw &= ~X86_FSW_TOP_MASK;
5552 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5553 pFpuCtx->FSW = fFsw;
5554
5555 iemFpuRotateStackPush(pFpuCtx);
5556}
5557
5558
5559/**
5560 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5561 * FOP.
5562 *
5563 * @param pIemCpu The IEM per CPU data.
5564 * @param pResult The result to store.
5565 * @param iStReg Which FPU register to store it in.
5566 */
5567IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5568{
5569 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5570 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5571 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5572 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5573}
5574
5575
5576/**
5577 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5578 * FOP, and then pops the stack.
5579 *
5580 * @param pIemCpu The IEM per CPU data.
5581 * @param pResult The result to store.
5582 * @param iStReg Which FPU register to store it in.
5583 */
5584IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5585{
5586 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5587 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5588 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5589 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5590 iemFpuMaybePopOne(pFpuCtx);
5591}
5592
5593
5594/**
5595 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5596 * FPUDP, and FPUDS.
5597 *
5598 * @param pIemCpu The IEM per CPU data.
5599 * @param pResult The result to store.
5600 * @param iStReg Which FPU register to store it in.
5601 * @param iEffSeg The effective memory operand selector register.
5602 * @param GCPtrEff The effective memory operand offset.
5603 */
5604IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5605 uint8_t iEffSeg, RTGCPTR GCPtrEff)
5606{
5607 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5608 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5609 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5610 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5611 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5612}
5613
5614
5615/**
5616 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5617 * FPUDP, and FPUDS, and then pops the stack.
5618 *
5619 * @param pIemCpu The IEM per CPU data.
5620 * @param pResult The result to store.
5621 * @param iStReg Which FPU register to store it in.
5622 * @param iEffSeg The effective memory operand selector register.
5623 * @param GCPtrEff The effective memory operand offset.
5624 */
5625IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5626 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5627{
5628 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5629 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5630 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5631 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5632 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5633 iemFpuMaybePopOne(pFpuCtx);
5634}
5635
5636
5637/**
5638 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5639 *
5640 * @param pIemCpu The IEM per CPU data.
5641 */
5642IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5643{
5644 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5645 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5646 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5647}
5648
5649
5650/**
5651 * Marks the specified stack register as free (for FFREE).
5652 *
5653 * @param pIemCpu The IEM per CPU data.
5654 * @param iStReg The register to free.
5655 */
5656IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5657{
5658 Assert(iStReg < 8);
5659 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5660 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5661 pFpuCtx->FTW &= ~RT_BIT(iReg);
5662}
5663
5664
5665/**
5666 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5667 *
5668 * @param pIemCpu The IEM per CPU data.
5669 */
5670IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5671{
5672 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5673 uint16_t uFsw = pFpuCtx->FSW;
5674 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5675 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5676 uFsw &= ~X86_FSW_TOP_MASK;
5677 uFsw |= uTop;
5678 pFpuCtx->FSW = uFsw;
5679}
5680
5681
5682/**
5683 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5684 *
5685 * @param pIemCpu The IEM per CPU data.
5686 */
5687IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5688{
5689 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5690 uint16_t uFsw = pFpuCtx->FSW;
5691 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5692 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5693 uFsw &= ~X86_FSW_TOP_MASK;
5694 uFsw |= uTop;
5695 pFpuCtx->FSW = uFsw;
5696}
5697
5698
5699/**
5700 * Updates the FSW, FOP, FPUIP, and FPUCS.
5701 *
5702 * @param pIemCpu The IEM per CPU data.
5703 * @param u16FSW The FSW from the current instruction.
5704 */
5705IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5706{
5707 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5708 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5709 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5710 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5711}
5712
5713
5714/**
5715 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5716 *
5717 * @param pIemCpu The IEM per CPU data.
5718 * @param u16FSW The FSW from the current instruction.
5719 */
5720IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5721{
5722 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5723 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5724 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5725 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5726 iemFpuMaybePopOne(pFpuCtx);
5727}
5728
5729
5730/**
5731 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5732 *
5733 * @param pIemCpu The IEM per CPU data.
5734 * @param u16FSW The FSW from the current instruction.
5735 * @param iEffSeg The effective memory operand selector register.
5736 * @param GCPtrEff The effective memory operand offset.
5737 */
5738IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5739{
5740 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5741 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5742 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5743 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5744 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5745}
5746
5747
5748/**
5749 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5750 *
5751 * @param pIemCpu The IEM per CPU data.
5752 * @param u16FSW The FSW from the current instruction.
5753 */
5754IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5755{
5756 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5757 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5758 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5759 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5760 iemFpuMaybePopOne(pFpuCtx);
5761 iemFpuMaybePopOne(pFpuCtx);
5762}
5763
5764
5765/**
5766 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5767 *
5768 * @param pIemCpu The IEM per CPU data.
5769 * @param u16FSW The FSW from the current instruction.
5770 * @param iEffSeg The effective memory operand selector register.
5771 * @param GCPtrEff The effective memory operand offset.
5772 */
5773IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5774{
5775 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5776 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5777 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5778 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5779 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5780 iemFpuMaybePopOne(pFpuCtx);
5781}
5782
5783
5784/**
5785 * Worker routine for raising an FPU stack underflow exception.
5786 *
5787 * @param pIemCpu The IEM per CPU data.
5788 * @param pFpuCtx The FPU context.
5789 * @param iStReg The stack register being accessed.
5790 */
5791IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5792{
5793 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5794 if (pFpuCtx->FCW & X86_FCW_IM)
5795 {
5796 /* Masked underflow. */
5797 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5798 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5799 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5800 if (iStReg != UINT8_MAX)
5801 {
5802 pFpuCtx->FTW |= RT_BIT(iReg);
5803 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5804 }
5805 }
5806 else
5807 {
5808 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5809 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5810 }
5811}
5812
5813
5814/**
5815 * Raises a FPU stack underflow exception.
5816 *
5817 * @param pIemCpu The IEM per CPU data.
5818 * @param iStReg The destination register that should be loaded
5819 * with QNaN if \#IS is not masked. Specify
5820 * UINT8_MAX if none (like for fcom).
5821 */
5822DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5823{
5824 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5825 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5826 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5827 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5828}
5829
5830
5831DECL_NO_INLINE(IEM_STATIC, void)
5832iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5833{
5834 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5835 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5836 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5837 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5838 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5839}
5840
5841
5842DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5843{
5844 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5845 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5846 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5847 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5848 iemFpuMaybePopOne(pFpuCtx);
5849}
5850
5851
5852DECL_NO_INLINE(IEM_STATIC, void)
5853iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5854{
5855 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5856 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5857 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5858 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5859 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5860 iemFpuMaybePopOne(pFpuCtx);
5861}
5862
5863
5864DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5865{
5866 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5867 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5868 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5869 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5870 iemFpuMaybePopOne(pFpuCtx);
5871 iemFpuMaybePopOne(pFpuCtx);
5872}
5873
5874
5875DECL_NO_INLINE(IEM_STATIC, void)
5876iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5877{
5878 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5879 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5880 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5881
5882 if (pFpuCtx->FCW & X86_FCW_IM)
5883 {
5884 /* Masked overflow - Push QNaN. */
5885 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5886 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5887 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5888 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5889 pFpuCtx->FTW |= RT_BIT(iNewTop);
5890 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5891 iemFpuRotateStackPush(pFpuCtx);
5892 }
5893 else
5894 {
5895 /* Exception pending - don't change TOP or the register stack. */
5896 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5897 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5898 }
5899}
5900
5901
5902DECL_NO_INLINE(IEM_STATIC, void)
5903iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5904{
5905 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5906 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5907 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5908
5909 if (pFpuCtx->FCW & X86_FCW_IM)
5910 {
5911 /* Masked overflow - Push QNaN. */
5912 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5913 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5914 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5915 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5916 pFpuCtx->FTW |= RT_BIT(iNewTop);
5917 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5918 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5919 iemFpuRotateStackPush(pFpuCtx);
5920 }
5921 else
5922 {
5923 /* Exception pending - don't change TOP or the register stack. */
5924 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5925 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5926 }
5927}
5928
5929
5930/**
5931 * Worker routine for raising an FPU stack overflow exception on a push.
5932 *
5933 * @param pFpuCtx The FPU context.
5934 */
5935IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5936{
5937 if (pFpuCtx->FCW & X86_FCW_IM)
5938 {
5939 /* Masked overflow. */
5940 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5941 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5942 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5943 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5944 pFpuCtx->FTW |= RT_BIT(iNewTop);
5945 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5946 iemFpuRotateStackPush(pFpuCtx);
5947 }
5948 else
5949 {
5950 /* Exception pending - don't change TOP or the register stack. */
5951 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5952 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5953 }
5954}
5955
5956
5957/**
5958 * Raises a FPU stack overflow exception on a push.
5959 *
5960 * @param pIemCpu The IEM per CPU data.
5961 */
5962DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5963{
5964 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5965 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5966 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5967 iemFpuStackPushOverflowOnly(pFpuCtx);
5968}
5969
5970
5971/**
5972 * Raises a FPU stack overflow exception on a push with a memory operand.
5973 *
5974 * @param pIemCpu The IEM per CPU data.
5975 * @param iEffSeg The effective memory operand selector register.
5976 * @param GCPtrEff The effective memory operand offset.
5977 */
5978DECL_NO_INLINE(IEM_STATIC, void)
5979iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5980{
5981 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5982 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5983 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5984 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5985 iemFpuStackPushOverflowOnly(pFpuCtx);
5986}
5987
5988
5989IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5990{
5991 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5992 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5993 if (pFpuCtx->FTW & RT_BIT(iReg))
5994 return VINF_SUCCESS;
5995 return VERR_NOT_FOUND;
5996}
5997
5998
5999IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
6000{
6001 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6002 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6003 if (pFpuCtx->FTW & RT_BIT(iReg))
6004 {
6005 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
6006 return VINF_SUCCESS;
6007 }
6008 return VERR_NOT_FOUND;
6009}
6010
6011
6012IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
6013 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
6014{
6015 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6016 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6017 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6018 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6019 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6020 {
6021 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6022 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
6023 return VINF_SUCCESS;
6024 }
6025 return VERR_NOT_FOUND;
6026}
6027
6028
6029IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
6030{
6031 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6032 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6033 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6034 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6035 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6036 {
6037 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6038 return VINF_SUCCESS;
6039 }
6040 return VERR_NOT_FOUND;
6041}
6042
6043
6044/**
6045 * Updates the FPU exception status after FCW is changed.
6046 *
6047 * @param pFpuCtx The FPU context.
6048 */
6049IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
6050{
6051 uint16_t u16Fsw = pFpuCtx->FSW;
6052 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
6053 u16Fsw |= X86_FSW_ES | X86_FSW_B;
6054 else
6055 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
6056 pFpuCtx->FSW = u16Fsw;
6057}
6058
6059
6060/**
6061 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6062 *
6063 * @returns The full FTW.
6064 * @param pFpuCtx The FPU context.
6065 */
6066IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6067{
6068 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6069 uint16_t u16Ftw = 0;
6070 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6071 for (unsigned iSt = 0; iSt < 8; iSt++)
6072 {
6073 unsigned const iReg = (iSt + iTop) & 7;
6074 if (!(u8Ftw & RT_BIT(iReg)))
6075 u16Ftw |= 3 << (iReg * 2); /* empty */
6076 else
6077 {
6078 uint16_t uTag;
6079 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6080 if (pr80Reg->s.uExponent == 0x7fff)
6081 uTag = 2; /* Exponent is all 1's => Special. */
6082 else if (pr80Reg->s.uExponent == 0x0000)
6083 {
6084 if (pr80Reg->s.u64Mantissa == 0x0000)
6085 uTag = 1; /* All bits are zero => Zero. */
6086 else
6087 uTag = 2; /* Must be special. */
6088 }
6089 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6090 uTag = 0; /* Valid. */
6091 else
6092 uTag = 2; /* Must be special. */
6093
6094 u16Ftw |= uTag << (iReg * 2); /* empty */
6095 }
6096 }
6097
6098 return u16Ftw;
6099}
6100
6101
6102/**
6103 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6104 *
6105 * @returns The compressed FTW.
6106 * @param u16FullFtw The full FTW to convert.
6107 */
6108IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6109{
6110 uint8_t u8Ftw = 0;
6111 for (unsigned i = 0; i < 8; i++)
6112 {
6113 if ((u16FullFtw & 3) != 3 /*empty*/)
6114 u8Ftw |= RT_BIT(i);
6115 u16FullFtw >>= 2;
6116 }
6117
6118 return u8Ftw;
6119}
6120
6121/** @} */
6122
6123
6124/** @name Memory access.
6125 *
6126 * @{
6127 */
6128
6129
6130/**
6131 * Updates the IEMCPU::cbWritten counter if applicable.
6132 *
6133 * @param pIemCpu The IEM per CPU data.
6134 * @param fAccess The access being accounted for.
6135 * @param cbMem The access size.
6136 */
6137DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6138{
6139 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6140 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6141 pIemCpu->cbWritten += (uint32_t)cbMem;
6142}
6143
6144
6145/**
6146 * Checks if the given segment can be written to, raise the appropriate
6147 * exception if not.
6148 *
6149 * @returns VBox strict status code.
6150 *
6151 * @param pIemCpu The IEM per CPU data.
6152 * @param pHid Pointer to the hidden register.
6153 * @param iSegReg The register number.
6154 * @param pu64BaseAddr Where to return the base address to use for the
6155 * segment. (In 64-bit code it may differ from the
6156 * base in the hidden segment.)
6157 */
6158IEM_STATIC VBOXSTRICTRC
6159iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6160{
6161 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6162 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6163 else
6164 {
6165 if (!pHid->Attr.n.u1Present)
6166 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6167
6168 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6169 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6170 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6171 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6172 *pu64BaseAddr = pHid->u64Base;
6173 }
6174 return VINF_SUCCESS;
6175}
6176
6177
6178/**
6179 * Checks if the given segment can be read from, raise the appropriate
6180 * exception if not.
6181 *
6182 * @returns VBox strict status code.
6183 *
6184 * @param pIemCpu The IEM per CPU data.
6185 * @param pHid Pointer to the hidden register.
6186 * @param iSegReg The register number.
6187 * @param pu64BaseAddr Where to return the base address to use for the
6188 * segment. (In 64-bit code it may differ from the
6189 * base in the hidden segment.)
6190 */
6191IEM_STATIC VBOXSTRICTRC
6192iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6193{
6194 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6195 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6196 else
6197 {
6198 if (!pHid->Attr.n.u1Present)
6199 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6200
6201 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6202 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6203 *pu64BaseAddr = pHid->u64Base;
6204 }
6205 return VINF_SUCCESS;
6206}
6207
6208
6209/**
6210 * Applies the segment limit, base and attributes.
6211 *
6212 * This may raise a \#GP or \#SS.
6213 *
6214 * @returns VBox strict status code.
6215 *
6216 * @param pIemCpu The IEM per CPU data.
6217 * @param fAccess The kind of access which is being performed.
6218 * @param iSegReg The index of the segment register to apply.
6219 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6220 * TSS, ++).
6221 * @param cbMem The access size.
6222 * @param pGCPtrMem Pointer to the guest memory address to apply
6223 * segmentation to. Input and output parameter.
6224 */
6225IEM_STATIC VBOXSTRICTRC
6226iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6227{
6228 if (iSegReg == UINT8_MAX)
6229 return VINF_SUCCESS;
6230
6231 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6232 switch (pIemCpu->enmCpuMode)
6233 {
6234 case IEMMODE_16BIT:
6235 case IEMMODE_32BIT:
6236 {
6237 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6238 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6239
6240 if ( pSel->Attr.n.u1Present
6241 && !pSel->Attr.n.u1Unusable)
6242 {
6243 Assert(pSel->Attr.n.u1DescType);
6244 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6245 {
6246 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6247 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6248 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6249
6250 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6251 {
6252 /** @todo CPL check. */
6253 }
6254
6255 /*
6256 * There are two kinds of data selectors, normal and expand down.
6257 */
6258 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6259 {
6260 if ( GCPtrFirst32 > pSel->u32Limit
6261 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6262 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6263 }
6264 else
6265 {
6266 /*
6267 * The upper boundary is defined by the B bit, not the G bit!
6268 */
6269 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6270 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6271 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6272 }
6273 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6274 }
6275 else
6276 {
6277
6278 /*
6279 * Code selector and usually be used to read thru, writing is
6280 * only permitted in real and V8086 mode.
6281 */
6282 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6283 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6284 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6285 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6286 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6287
6288 if ( GCPtrFirst32 > pSel->u32Limit
6289 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6290 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6291
6292 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6293 {
6294 /** @todo CPL check. */
6295 }
6296
6297 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6298 }
6299 }
6300 else
6301 return iemRaiseGeneralProtectionFault0(pIemCpu);
6302 return VINF_SUCCESS;
6303 }
6304
6305 case IEMMODE_64BIT:
6306 {
6307 RTGCPTR GCPtrMem = *pGCPtrMem;
6308 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6309 *pGCPtrMem = GCPtrMem + pSel->u64Base;
6310
6311 Assert(cbMem >= 1);
6312 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
6313 return VINF_SUCCESS;
6314 return iemRaiseGeneralProtectionFault0(pIemCpu);
6315 }
6316
6317 default:
6318 AssertFailedReturn(VERR_IEM_IPE_7);
6319 }
6320}
6321
6322
6323/**
6324 * Translates a virtual address to a physical physical address and checks if we
6325 * can access the page as specified.
6326 *
6327 * @param pIemCpu The IEM per CPU data.
6328 * @param GCPtrMem The virtual address.
6329 * @param fAccess The intended access.
6330 * @param pGCPhysMem Where to return the physical address.
6331 */
6332IEM_STATIC VBOXSTRICTRC
6333iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6334{
6335 /** @todo Need a different PGM interface here. We're currently using
6336 * generic / REM interfaces. this won't cut it for R0 & RC. */
6337 RTGCPHYS GCPhys;
6338 uint64_t fFlags;
6339 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6340 if (RT_FAILURE(rc))
6341 {
6342 /** @todo Check unassigned memory in unpaged mode. */
6343 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6344 *pGCPhysMem = NIL_RTGCPHYS;
6345 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6346 }
6347
6348 /* If the page is writable and does not have the no-exec bit set, all
6349 access is allowed. Otherwise we'll have to check more carefully... */
6350 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6351 {
6352 /* Write to read only memory? */
6353 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6354 && !(fFlags & X86_PTE_RW)
6355 && ( pIemCpu->uCpl != 0
6356 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6357 {
6358 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6359 *pGCPhysMem = NIL_RTGCPHYS;
6360 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6361 }
6362
6363 /* Kernel memory accessed by userland? */
6364 if ( !(fFlags & X86_PTE_US)
6365 && pIemCpu->uCpl == 3
6366 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6367 {
6368 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6369 *pGCPhysMem = NIL_RTGCPHYS;
6370 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6371 }
6372
6373 /* Executing non-executable memory? */
6374 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6375 && (fFlags & X86_PTE_PAE_NX)
6376 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6377 {
6378 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6379 *pGCPhysMem = NIL_RTGCPHYS;
6380 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6381 VERR_ACCESS_DENIED);
6382 }
6383 }
6384
6385 /*
6386 * Set the dirty / access flags.
6387 * ASSUMES this is set when the address is translated rather than on committ...
6388 */
6389 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6390 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6391 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6392 {
6393 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6394 AssertRC(rc2);
6395 }
6396
6397 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6398 *pGCPhysMem = GCPhys;
6399 return VINF_SUCCESS;
6400}
6401
6402
6403
6404/**
6405 * Maps a physical page.
6406 *
6407 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6408 * @param pIemCpu The IEM per CPU data.
6409 * @param GCPhysMem The physical address.
6410 * @param fAccess The intended access.
6411 * @param ppvMem Where to return the mapping address.
6412 * @param pLock The PGM lock.
6413 */
6414IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6415{
6416#ifdef IEM_VERIFICATION_MODE_FULL
6417 /* Force the alternative path so we can ignore writes. */
6418 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6419 {
6420 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6421 {
6422 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6423 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6424 if (RT_FAILURE(rc2))
6425 pIemCpu->fProblematicMemory = true;
6426 }
6427 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6428 }
6429#endif
6430#ifdef IEM_LOG_MEMORY_WRITES
6431 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6432 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6433#endif
6434#ifdef IEM_VERIFICATION_MODE_MINIMAL
6435 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6436#endif
6437
6438 /** @todo This API may require some improving later. A private deal with PGM
6439 * regarding locking and unlocking needs to be struct. A couple of TLBs
6440 * living in PGM, but with publicly accessible inlined access methods
6441 * could perhaps be an even better solution. */
6442 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6443 GCPhysMem,
6444 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6445 pIemCpu->fBypassHandlers,
6446 ppvMem,
6447 pLock);
6448 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6449 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6450
6451#ifdef IEM_VERIFICATION_MODE_FULL
6452 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6453 pIemCpu->fProblematicMemory = true;
6454#endif
6455 return rc;
6456}
6457
6458
6459/**
6460 * Unmap a page previously mapped by iemMemPageMap.
6461 *
6462 * @param pIemCpu The IEM per CPU data.
6463 * @param GCPhysMem The physical address.
6464 * @param fAccess The intended access.
6465 * @param pvMem What iemMemPageMap returned.
6466 * @param pLock The PGM lock.
6467 */
6468DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6469{
6470 NOREF(pIemCpu);
6471 NOREF(GCPhysMem);
6472 NOREF(fAccess);
6473 NOREF(pvMem);
6474 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6475}
6476
6477
6478/**
6479 * Looks up a memory mapping entry.
6480 *
6481 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6482 * @param pIemCpu The IEM per CPU data.
6483 * @param pvMem The memory address.
6484 * @param fAccess The access to.
6485 */
6486DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6487{
6488 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
6489 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6490 if ( pIemCpu->aMemMappings[0].pv == pvMem
6491 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6492 return 0;
6493 if ( pIemCpu->aMemMappings[1].pv == pvMem
6494 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6495 return 1;
6496 if ( pIemCpu->aMemMappings[2].pv == pvMem
6497 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6498 return 2;
6499 return VERR_NOT_FOUND;
6500}
6501
6502
6503/**
6504 * Finds a free memmap entry when using iNextMapping doesn't work.
6505 *
6506 * @returns Memory mapping index, 1024 on failure.
6507 * @param pIemCpu The IEM per CPU data.
6508 */
6509IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6510{
6511 /*
6512 * The easy case.
6513 */
6514 if (pIemCpu->cActiveMappings == 0)
6515 {
6516 pIemCpu->iNextMapping = 1;
6517 return 0;
6518 }
6519
6520 /* There should be enough mappings for all instructions. */
6521 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6522
6523 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6524 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6525 return i;
6526
6527 AssertFailedReturn(1024);
6528}
6529
6530
6531/**
6532 * Commits a bounce buffer that needs writing back and unmaps it.
6533 *
6534 * @returns Strict VBox status code.
6535 * @param pIemCpu The IEM per CPU data.
6536 * @param iMemMap The index of the buffer to commit.
6537 */
6538IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6539{
6540 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6541 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6542
6543 /*
6544 * Do the writing.
6545 */
6546#ifndef IEM_VERIFICATION_MODE_MINIMAL
6547 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6548 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6549 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6550 {
6551 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6552 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6553 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6554 if (!pIemCpu->fBypassHandlers)
6555 {
6556 /*
6557 * Carefully and efficiently dealing with access handler return
6558 * codes make this a little bloated.
6559 */
6560 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6561 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6562 pbBuf,
6563 cbFirst,
6564 PGMACCESSORIGIN_IEM);
6565 if (rcStrict == VINF_SUCCESS)
6566 {
6567 if (cbSecond)
6568 {
6569 rcStrict = PGMPhysWrite(pVM,
6570 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6571 pbBuf + cbFirst,
6572 cbSecond,
6573 PGMACCESSORIGIN_IEM);
6574 if (rcStrict == VINF_SUCCESS)
6575 { /* nothing */ }
6576 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6577 {
6578 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6579 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6580 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6581 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6582 }
6583 else
6584 {
6585 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6586 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6587 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6588 return rcStrict;
6589 }
6590 }
6591 }
6592 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6593 {
6594 if (!cbSecond)
6595 {
6596 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6597 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6598 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6599 }
6600 else
6601 {
6602 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6603 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6604 pbBuf + cbFirst,
6605 cbSecond,
6606 PGMACCESSORIGIN_IEM);
6607 if (rcStrict2 == VINF_SUCCESS)
6608 {
6609 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6610 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6611 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6612 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6613 }
6614 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6615 {
6616 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6617 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6618 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6619 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6620 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6621 }
6622 else
6623 {
6624 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6625 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6626 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6627 return rcStrict2;
6628 }
6629 }
6630 }
6631 else
6632 {
6633 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6634 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6635 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6636 return rcStrict;
6637 }
6638 }
6639 else
6640 {
6641 /*
6642 * No access handlers, much simpler.
6643 */
6644 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6645 if (RT_SUCCESS(rc))
6646 {
6647 if (cbSecond)
6648 {
6649 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6650 if (RT_SUCCESS(rc))
6651 { /* likely */ }
6652 else
6653 {
6654 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6655 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6656 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6657 return rc;
6658 }
6659 }
6660 }
6661 else
6662 {
6663 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6664 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6665 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6666 return rc;
6667 }
6668 }
6669 }
6670#endif
6671
6672#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6673 /*
6674 * Record the write(s).
6675 */
6676 if (!pIemCpu->fNoRem)
6677 {
6678 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6679 if (pEvtRec)
6680 {
6681 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6682 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6683 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6684 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6685 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6686 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6687 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6688 }
6689 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6690 {
6691 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6692 if (pEvtRec)
6693 {
6694 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6695 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6696 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6697 memcpy(pEvtRec->u.RamWrite.ab,
6698 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6699 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6700 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6701 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6702 }
6703 }
6704 }
6705#endif
6706#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6707 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6708 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6709 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6710 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6711 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6712 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6713
6714 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6715 g_cbIemWrote = cbWrote;
6716 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6717#endif
6718
6719 /*
6720 * Free the mapping entry.
6721 */
6722 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6723 Assert(pIemCpu->cActiveMappings != 0);
6724 pIemCpu->cActiveMappings--;
6725 return VINF_SUCCESS;
6726}
6727
6728
6729/**
6730 * iemMemMap worker that deals with a request crossing pages.
6731 */
6732IEM_STATIC VBOXSTRICTRC
6733iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6734{
6735 /*
6736 * Do the address translations.
6737 */
6738 RTGCPHYS GCPhysFirst;
6739 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6740 if (rcStrict != VINF_SUCCESS)
6741 return rcStrict;
6742
6743 RTGCPHYS GCPhysSecond;
6744 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
6745 fAccess, &GCPhysSecond);
6746 if (rcStrict != VINF_SUCCESS)
6747 return rcStrict;
6748 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6749
6750 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6751#ifdef IEM_VERIFICATION_MODE_FULL
6752 /*
6753 * Detect problematic memory when verifying so we can select
6754 * the right execution engine. (TLB: Redo this.)
6755 */
6756 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6757 {
6758 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6759 if (RT_SUCCESS(rc2))
6760 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6761 if (RT_FAILURE(rc2))
6762 pIemCpu->fProblematicMemory = true;
6763 }
6764#endif
6765
6766
6767 /*
6768 * Read in the current memory content if it's a read, execute or partial
6769 * write access.
6770 */
6771 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6772 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6773 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6774
6775 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6776 {
6777 if (!pIemCpu->fBypassHandlers)
6778 {
6779 /*
6780 * Must carefully deal with access handler status codes here,
6781 * makes the code a bit bloated.
6782 */
6783 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6784 if (rcStrict == VINF_SUCCESS)
6785 {
6786 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6787 if (rcStrict == VINF_SUCCESS)
6788 { /*likely */ }
6789 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6790 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6791 else
6792 {
6793 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6794 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6795 return rcStrict;
6796 }
6797 }
6798 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6799 {
6800 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6801 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6802 {
6803 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6804 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6805 }
6806 else
6807 {
6808 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6809 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6810 return rcStrict2;
6811 }
6812 }
6813 else
6814 {
6815 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6816 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6817 return rcStrict;
6818 }
6819 }
6820 else
6821 {
6822 /*
6823 * No informational status codes here, much more straight forward.
6824 */
6825 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6826 if (RT_SUCCESS(rc))
6827 {
6828 Assert(rc == VINF_SUCCESS);
6829 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6830 if (RT_SUCCESS(rc))
6831 Assert(rc == VINF_SUCCESS);
6832 else
6833 {
6834 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6835 return rc;
6836 }
6837 }
6838 else
6839 {
6840 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6841 return rc;
6842 }
6843 }
6844
6845#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6846 if ( !pIemCpu->fNoRem
6847 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6848 {
6849 /*
6850 * Record the reads.
6851 */
6852 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6853 if (pEvtRec)
6854 {
6855 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6856 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6857 pEvtRec->u.RamRead.cb = cbFirstPage;
6858 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6859 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6860 }
6861 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6862 if (pEvtRec)
6863 {
6864 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6865 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6866 pEvtRec->u.RamRead.cb = cbSecondPage;
6867 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6868 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6869 }
6870 }
6871#endif
6872 }
6873#ifdef VBOX_STRICT
6874 else
6875 memset(pbBuf, 0xcc, cbMem);
6876 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6877 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6878#endif
6879
6880 /*
6881 * Commit the bounce buffer entry.
6882 */
6883 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6884 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6885 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6886 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6887 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6888 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6889 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6890 pIemCpu->iNextMapping = iMemMap + 1;
6891 pIemCpu->cActiveMappings++;
6892
6893 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6894 *ppvMem = pbBuf;
6895 return VINF_SUCCESS;
6896}
6897
6898
6899/**
6900 * iemMemMap woker that deals with iemMemPageMap failures.
6901 */
6902IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6903 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6904{
6905 /*
6906 * Filter out conditions we can handle and the ones which shouldn't happen.
6907 */
6908 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6909 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6910 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6911 {
6912 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6913 return rcMap;
6914 }
6915 pIemCpu->cPotentialExits++;
6916
6917 /*
6918 * Read in the current memory content if it's a read, execute or partial
6919 * write access.
6920 */
6921 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6922 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6923 {
6924 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6925 memset(pbBuf, 0xff, cbMem);
6926 else
6927 {
6928 int rc;
6929 if (!pIemCpu->fBypassHandlers)
6930 {
6931 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6932 if (rcStrict == VINF_SUCCESS)
6933 { /* nothing */ }
6934 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6935 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6936 else
6937 {
6938 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6939 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6940 return rcStrict;
6941 }
6942 }
6943 else
6944 {
6945 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6946 if (RT_SUCCESS(rc))
6947 { /* likely */ }
6948 else
6949 {
6950 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6951 GCPhysFirst, rc));
6952 return rc;
6953 }
6954 }
6955 }
6956
6957#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6958 if ( !pIemCpu->fNoRem
6959 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6960 {
6961 /*
6962 * Record the read.
6963 */
6964 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6965 if (pEvtRec)
6966 {
6967 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6968 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6969 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6970 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6971 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6972 }
6973 }
6974#endif
6975 }
6976#ifdef VBOX_STRICT
6977 else
6978 memset(pbBuf, 0xcc, cbMem);
6979#endif
6980#ifdef VBOX_STRICT
6981 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6982 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6983#endif
6984
6985 /*
6986 * Commit the bounce buffer entry.
6987 */
6988 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6989 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6990 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6991 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6992 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6993 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6994 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6995 pIemCpu->iNextMapping = iMemMap + 1;
6996 pIemCpu->cActiveMappings++;
6997
6998 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6999 *ppvMem = pbBuf;
7000 return VINF_SUCCESS;
7001}
7002
7003
7004
7005/**
7006 * Maps the specified guest memory for the given kind of access.
7007 *
7008 * This may be using bounce buffering of the memory if it's crossing a page
7009 * boundary or if there is an access handler installed for any of it. Because
7010 * of lock prefix guarantees, we're in for some extra clutter when this
7011 * happens.
7012 *
7013 * This may raise a \#GP, \#SS, \#PF or \#AC.
7014 *
7015 * @returns VBox strict status code.
7016 *
7017 * @param pIemCpu The IEM per CPU data.
7018 * @param ppvMem Where to return the pointer to the mapped
7019 * memory.
7020 * @param cbMem The number of bytes to map. This is usually 1,
7021 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7022 * string operations it can be up to a page.
7023 * @param iSegReg The index of the segment register to use for
7024 * this access. The base and limits are checked.
7025 * Use UINT8_MAX to indicate that no segmentation
7026 * is required (for IDT, GDT and LDT accesses).
7027 * @param GCPtrMem The address of the guest memory.
7028 * @param fAccess How the memory is being accessed. The
7029 * IEM_ACCESS_TYPE_XXX bit is used to figure out
7030 * how to map the memory, while the
7031 * IEM_ACCESS_WHAT_XXX bit is used when raising
7032 * exceptions.
7033 */
7034IEM_STATIC VBOXSTRICTRC
7035iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
7036{
7037 /*
7038 * Check the input and figure out which mapping entry to use.
7039 */
7040 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7041 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
7042 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
7043
7044 unsigned iMemMap = pIemCpu->iNextMapping;
7045 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
7046 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7047 {
7048 iMemMap = iemMemMapFindFree(pIemCpu);
7049 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
7050 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
7051 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
7052 pIemCpu->aMemMappings[2].fAccess),
7053 VERR_IEM_IPE_9);
7054 }
7055
7056 /*
7057 * Map the memory, checking that we can actually access it. If something
7058 * slightly complicated happens, fall back on bounce buffering.
7059 */
7060 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7061 if (rcStrict != VINF_SUCCESS)
7062 return rcStrict;
7063
7064 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
7065 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
7066
7067 RTGCPHYS GCPhysFirst;
7068 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
7069 if (rcStrict != VINF_SUCCESS)
7070 return rcStrict;
7071
7072 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7073 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7074 if (fAccess & IEM_ACCESS_TYPE_READ)
7075 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7076
7077 void *pvMem;
7078 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7079 if (rcStrict != VINF_SUCCESS)
7080 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
7081
7082 /*
7083 * Fill in the mapping table entry.
7084 */
7085 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7086 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7087 pIemCpu->iNextMapping = iMemMap + 1;
7088 pIemCpu->cActiveMappings++;
7089
7090 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7091 *ppvMem = pvMem;
7092 return VINF_SUCCESS;
7093}
7094
7095
7096/**
7097 * Commits the guest memory if bounce buffered and unmaps it.
7098 *
7099 * @returns Strict VBox status code.
7100 * @param pIemCpu The IEM per CPU data.
7101 * @param pvMem The mapping.
7102 * @param fAccess The kind of access.
7103 */
7104IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7105{
7106 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7107 AssertReturn(iMemMap >= 0, iMemMap);
7108
7109 /* If it's bounce buffered, we may need to write back the buffer. */
7110 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7111 {
7112 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7113 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7114 }
7115 /* Otherwise unlock it. */
7116 else
7117 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7118
7119 /* Free the entry. */
7120 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7121 Assert(pIemCpu->cActiveMappings != 0);
7122 pIemCpu->cActiveMappings--;
7123 return VINF_SUCCESS;
7124}
7125
7126
7127/**
7128 * Rollbacks mappings, releasing page locks and such.
7129 *
7130 * The caller shall only call this after checking cActiveMappings.
7131 *
7132 * @returns Strict VBox status code to pass up.
7133 * @param pIemCpu The IEM per CPU data.
7134 */
7135IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7136{
7137 Assert(pIemCpu->cActiveMappings > 0);
7138
7139 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7140 while (iMemMap-- > 0)
7141 {
7142 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7143 if (fAccess != IEM_ACCESS_INVALID)
7144 {
7145 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7146 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7147 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7148 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7149 Assert(pIemCpu->cActiveMappings > 0);
7150 pIemCpu->cActiveMappings--;
7151 }
7152 }
7153}
7154
7155
7156/**
7157 * Fetches a data byte.
7158 *
7159 * @returns Strict VBox status code.
7160 * @param pIemCpu The IEM per CPU data.
7161 * @param pu8Dst Where to return the byte.
7162 * @param iSegReg The index of the segment register to use for
7163 * this access. The base and limits are checked.
7164 * @param GCPtrMem The address of the guest memory.
7165 */
7166IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7167{
7168 /* The lazy approach for now... */
7169 uint8_t const *pu8Src;
7170 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7171 if (rc == VINF_SUCCESS)
7172 {
7173 *pu8Dst = *pu8Src;
7174 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7175 }
7176 return rc;
7177}
7178
7179
7180/**
7181 * Fetches a data word.
7182 *
7183 * @returns Strict VBox status code.
7184 * @param pIemCpu The IEM per CPU data.
7185 * @param pu16Dst Where to return the word.
7186 * @param iSegReg The index of the segment register to use for
7187 * this access. The base and limits are checked.
7188 * @param GCPtrMem The address of the guest memory.
7189 */
7190IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7191{
7192 /* The lazy approach for now... */
7193 uint16_t const *pu16Src;
7194 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7195 if (rc == VINF_SUCCESS)
7196 {
7197 *pu16Dst = *pu16Src;
7198 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7199 }
7200 return rc;
7201}
7202
7203
7204/**
7205 * Fetches a data dword.
7206 *
7207 * @returns Strict VBox status code.
7208 * @param pIemCpu The IEM per CPU data.
7209 * @param pu32Dst Where to return the dword.
7210 * @param iSegReg The index of the segment register to use for
7211 * this access. The base and limits are checked.
7212 * @param GCPtrMem The address of the guest memory.
7213 */
7214IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7215{
7216 /* The lazy approach for now... */
7217 uint32_t const *pu32Src;
7218 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7219 if (rc == VINF_SUCCESS)
7220 {
7221 *pu32Dst = *pu32Src;
7222 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7223 }
7224 return rc;
7225}
7226
7227
7228#ifdef SOME_UNUSED_FUNCTION
7229/**
7230 * Fetches a data dword and sign extends it to a qword.
7231 *
7232 * @returns Strict VBox status code.
7233 * @param pIemCpu The IEM per CPU data.
7234 * @param pu64Dst Where to return the sign extended value.
7235 * @param iSegReg The index of the segment register to use for
7236 * this access. The base and limits are checked.
7237 * @param GCPtrMem The address of the guest memory.
7238 */
7239IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7240{
7241 /* The lazy approach for now... */
7242 int32_t const *pi32Src;
7243 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7244 if (rc == VINF_SUCCESS)
7245 {
7246 *pu64Dst = *pi32Src;
7247 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7248 }
7249#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7250 else
7251 *pu64Dst = 0;
7252#endif
7253 return rc;
7254}
7255#endif
7256
7257
7258/**
7259 * Fetches a data qword.
7260 *
7261 * @returns Strict VBox status code.
7262 * @param pIemCpu The IEM per CPU data.
7263 * @param pu64Dst Where to return the qword.
7264 * @param iSegReg The index of the segment register to use for
7265 * this access. The base and limits are checked.
7266 * @param GCPtrMem The address of the guest memory.
7267 */
7268IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7269{
7270 /* The lazy approach for now... */
7271 uint64_t const *pu64Src;
7272 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7273 if (rc == VINF_SUCCESS)
7274 {
7275 *pu64Dst = *pu64Src;
7276 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7277 }
7278 return rc;
7279}
7280
7281
7282/**
7283 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7284 *
7285 * @returns Strict VBox status code.
7286 * @param pIemCpu The IEM per CPU data.
7287 * @param pu64Dst Where to return the qword.
7288 * @param iSegReg The index of the segment register to use for
7289 * this access. The base and limits are checked.
7290 * @param GCPtrMem The address of the guest memory.
7291 */
7292IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7293{
7294 /* The lazy approach for now... */
7295 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7296 if (RT_UNLIKELY(GCPtrMem & 15))
7297 return iemRaiseGeneralProtectionFault0(pIemCpu);
7298
7299 uint64_t const *pu64Src;
7300 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7301 if (rc == VINF_SUCCESS)
7302 {
7303 *pu64Dst = *pu64Src;
7304 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7305 }
7306 return rc;
7307}
7308
7309
7310/**
7311 * Fetches a data tword.
7312 *
7313 * @returns Strict VBox status code.
7314 * @param pIemCpu The IEM per CPU data.
7315 * @param pr80Dst Where to return the tword.
7316 * @param iSegReg The index of the segment register to use for
7317 * this access. The base and limits are checked.
7318 * @param GCPtrMem The address of the guest memory.
7319 */
7320IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7321{
7322 /* The lazy approach for now... */
7323 PCRTFLOAT80U pr80Src;
7324 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7325 if (rc == VINF_SUCCESS)
7326 {
7327 *pr80Dst = *pr80Src;
7328 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7329 }
7330 return rc;
7331}
7332
7333
7334/**
7335 * Fetches a data dqword (double qword), generally SSE related.
7336 *
7337 * @returns Strict VBox status code.
7338 * @param pIemCpu The IEM per CPU data.
7339 * @param pu128Dst Where to return the qword.
7340 * @param iSegReg The index of the segment register to use for
7341 * this access. The base and limits are checked.
7342 * @param GCPtrMem The address of the guest memory.
7343 */
7344IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7345{
7346 /* The lazy approach for now... */
7347 uint128_t const *pu128Src;
7348 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7349 if (rc == VINF_SUCCESS)
7350 {
7351 *pu128Dst = *pu128Src;
7352 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7353 }
7354 return rc;
7355}
7356
7357
7358/**
7359 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7360 * related.
7361 *
7362 * Raises \#GP(0) if not aligned.
7363 *
7364 * @returns Strict VBox status code.
7365 * @param pIemCpu The IEM per CPU data.
7366 * @param pu128Dst Where to return the qword.
7367 * @param iSegReg The index of the segment register to use for
7368 * this access. The base and limits are checked.
7369 * @param GCPtrMem The address of the guest memory.
7370 */
7371IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7372{
7373 /* The lazy approach for now... */
7374 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7375 if ( (GCPtrMem & 15)
7376 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7377 return iemRaiseGeneralProtectionFault0(pIemCpu);
7378
7379 uint128_t const *pu128Src;
7380 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7381 if (rc == VINF_SUCCESS)
7382 {
7383 *pu128Dst = *pu128Src;
7384 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7385 }
7386 return rc;
7387}
7388
7389
7390
7391
7392/**
7393 * Fetches a descriptor register (lgdt, lidt).
7394 *
7395 * @returns Strict VBox status code.
7396 * @param pIemCpu The IEM per CPU data.
7397 * @param pcbLimit Where to return the limit.
7398 * @param pGCPtrBase Where to return the base.
7399 * @param iSegReg The index of the segment register to use for
7400 * this access. The base and limits are checked.
7401 * @param GCPtrMem The address of the guest memory.
7402 * @param enmOpSize The effective operand size.
7403 */
7404IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7405 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7406{
7407 /*
7408 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7409 * little special:
7410 * - The two reads are done separately.
7411 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7412 * - We suspect the 386 to actually commit the limit before the base in
7413 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7414 * don't try emulate this eccentric behavior, because it's not well
7415 * enough understood and rather hard to trigger.
7416 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7417 */
7418 VBOXSTRICTRC rcStrict;
7419 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7420 {
7421 rcStrict = iemMemFetchDataU16(pIemCpu, pcbLimit, iSegReg, GCPtrMem);
7422 if (rcStrict == VINF_SUCCESS)
7423 rcStrict = iemMemFetchDataU64(pIemCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7424 }
7425 else
7426 {
7427 uint32_t uTmp;
7428 if (enmOpSize == IEMMODE_32BIT)
7429 {
7430 if (IEM_GET_TARGET_CPU(pIemCpu) != IEMTARGETCPU_486)
7431 {
7432 rcStrict = iemMemFetchDataU16(pIemCpu, pcbLimit, iSegReg, GCPtrMem);
7433 if (rcStrict == VINF_SUCCESS)
7434 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem + 2);
7435 }
7436 else
7437 {
7438 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem);
7439 if (rcStrict == VINF_SUCCESS)
7440 {
7441 *pcbLimit = (uint16_t)uTmp;
7442 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem + 2);
7443 }
7444 }
7445 if (rcStrict == VINF_SUCCESS)
7446 *pGCPtrBase = uTmp;
7447 }
7448 else
7449 {
7450 rcStrict = iemMemFetchDataU16(pIemCpu, pcbLimit, iSegReg, GCPtrMem);
7451 if (rcStrict == VINF_SUCCESS)
7452 {
7453 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem + 2);
7454 if (rcStrict == VINF_SUCCESS)
7455 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7456 }
7457 }
7458 }
7459 return rcStrict;
7460}
7461
7462
7463
7464/**
7465 * Stores a data byte.
7466 *
7467 * @returns Strict VBox status code.
7468 * @param pIemCpu The IEM per CPU data.
7469 * @param iSegReg The index of the segment register to use for
7470 * this access. The base and limits are checked.
7471 * @param GCPtrMem The address of the guest memory.
7472 * @param u8Value The value to store.
7473 */
7474IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7475{
7476 /* The lazy approach for now... */
7477 uint8_t *pu8Dst;
7478 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7479 if (rc == VINF_SUCCESS)
7480 {
7481 *pu8Dst = u8Value;
7482 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7483 }
7484 return rc;
7485}
7486
7487
7488/**
7489 * Stores a data word.
7490 *
7491 * @returns Strict VBox status code.
7492 * @param pIemCpu The IEM per CPU data.
7493 * @param iSegReg The index of the segment register to use for
7494 * this access. The base and limits are checked.
7495 * @param GCPtrMem The address of the guest memory.
7496 * @param u16Value The value to store.
7497 */
7498IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7499{
7500 /* The lazy approach for now... */
7501 uint16_t *pu16Dst;
7502 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7503 if (rc == VINF_SUCCESS)
7504 {
7505 *pu16Dst = u16Value;
7506 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7507 }
7508 return rc;
7509}
7510
7511
7512/**
7513 * Stores a data dword.
7514 *
7515 * @returns Strict VBox status code.
7516 * @param pIemCpu The IEM per CPU data.
7517 * @param iSegReg The index of the segment register to use for
7518 * this access. The base and limits are checked.
7519 * @param GCPtrMem The address of the guest memory.
7520 * @param u32Value The value to store.
7521 */
7522IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7523{
7524 /* The lazy approach for now... */
7525 uint32_t *pu32Dst;
7526 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7527 if (rc == VINF_SUCCESS)
7528 {
7529 *pu32Dst = u32Value;
7530 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7531 }
7532 return rc;
7533}
7534
7535
7536/**
7537 * Stores a data qword.
7538 *
7539 * @returns Strict VBox status code.
7540 * @param pIemCpu The IEM per CPU data.
7541 * @param iSegReg The index of the segment register to use for
7542 * this access. The base and limits are checked.
7543 * @param GCPtrMem The address of the guest memory.
7544 * @param u64Value The value to store.
7545 */
7546IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7547{
7548 /* The lazy approach for now... */
7549 uint64_t *pu64Dst;
7550 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7551 if (rc == VINF_SUCCESS)
7552 {
7553 *pu64Dst = u64Value;
7554 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7555 }
7556 return rc;
7557}
7558
7559
7560/**
7561 * Stores a data dqword.
7562 *
7563 * @returns Strict VBox status code.
7564 * @param pIemCpu The IEM per CPU data.
7565 * @param iSegReg The index of the segment register to use for
7566 * this access. The base and limits are checked.
7567 * @param GCPtrMem The address of the guest memory.
7568 * @param u128Value The value to store.
7569 */
7570IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7571{
7572 /* The lazy approach for now... */
7573 uint128_t *pu128Dst;
7574 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7575 if (rc == VINF_SUCCESS)
7576 {
7577 *pu128Dst = u128Value;
7578 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7579 }
7580 return rc;
7581}
7582
7583
7584/**
7585 * Stores a data dqword, SSE aligned.
7586 *
7587 * @returns Strict VBox status code.
7588 * @param pIemCpu The IEM per CPU data.
7589 * @param iSegReg The index of the segment register to use for
7590 * this access. The base and limits are checked.
7591 * @param GCPtrMem The address of the guest memory.
7592 * @param u128Value The value to store.
7593 */
7594IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7595{
7596 /* The lazy approach for now... */
7597 if ( (GCPtrMem & 15)
7598 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7599 return iemRaiseGeneralProtectionFault0(pIemCpu);
7600
7601 uint128_t *pu128Dst;
7602 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7603 if (rc == VINF_SUCCESS)
7604 {
7605 *pu128Dst = u128Value;
7606 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7607 }
7608 return rc;
7609}
7610
7611
7612/**
7613 * Stores a descriptor register (sgdt, sidt).
7614 *
7615 * @returns Strict VBox status code.
7616 * @param pIemCpu The IEM per CPU data.
7617 * @param cbLimit The limit.
7618 * @param GCPtrBase The base address.
7619 * @param iSegReg The index of the segment register to use for
7620 * this access. The base and limits are checked.
7621 * @param GCPtrMem The address of the guest memory.
7622 */
7623IEM_STATIC VBOXSTRICTRC
7624iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
7625{
7626 /*
7627 * The SIDT and SGDT instructions actually stores the data using two
7628 * independent writes. The instructions does not respond to opsize prefixes.
7629 */
7630 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pIemCpu, iSegReg, GCPtrMem, cbLimit);
7631 if (rcStrict == VINF_SUCCESS)
7632 {
7633 if (pIemCpu->enmCpuMode == IEMMODE_16BIT)
7634 rcStrict = iemMemStoreDataU32(pIemCpu, iSegReg, GCPtrMem + 2,
7635 IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_286
7636 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7637 else if (pIemCpu->enmCpuMode == IEMMODE_32BIT)
7638 rcStrict = iemMemStoreDataU32(pIemCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7639 else
7640 rcStrict = iemMemStoreDataU64(pIemCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7641 }
7642 return rcStrict;
7643}
7644
7645
7646/**
7647 * Pushes a word onto the stack.
7648 *
7649 * @returns Strict VBox status code.
7650 * @param pIemCpu The IEM per CPU data.
7651 * @param u16Value The value to push.
7652 */
7653IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7654{
7655 /* Increment the stack pointer. */
7656 uint64_t uNewRsp;
7657 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7658 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7659
7660 /* Write the word the lazy way. */
7661 uint16_t *pu16Dst;
7662 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7663 if (rc == VINF_SUCCESS)
7664 {
7665 *pu16Dst = u16Value;
7666 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7667 }
7668
7669 /* Commit the new RSP value unless we an access handler made trouble. */
7670 if (rc == VINF_SUCCESS)
7671 pCtx->rsp = uNewRsp;
7672
7673 return rc;
7674}
7675
7676
7677/**
7678 * Pushes a dword onto the stack.
7679 *
7680 * @returns Strict VBox status code.
7681 * @param pIemCpu The IEM per CPU data.
7682 * @param u32Value The value to push.
7683 */
7684IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7685{
7686 /* Increment the stack pointer. */
7687 uint64_t uNewRsp;
7688 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7689 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7690
7691 /* Write the dword the lazy way. */
7692 uint32_t *pu32Dst;
7693 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7694 if (rc == VINF_SUCCESS)
7695 {
7696 *pu32Dst = u32Value;
7697 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7698 }
7699
7700 /* Commit the new RSP value unless we an access handler made trouble. */
7701 if (rc == VINF_SUCCESS)
7702 pCtx->rsp = uNewRsp;
7703
7704 return rc;
7705}
7706
7707
7708/**
7709 * Pushes a dword segment register value onto the stack.
7710 *
7711 * @returns Strict VBox status code.
7712 * @param pIemCpu The IEM per CPU data.
7713 * @param u32Value The value to push.
7714 */
7715IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7716{
7717 /* Increment the stack pointer. */
7718 uint64_t uNewRsp;
7719 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7720 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7721
7722 VBOXSTRICTRC rc;
7723 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7724 {
7725 /* The recompiler writes a full dword. */
7726 uint32_t *pu32Dst;
7727 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7728 if (rc == VINF_SUCCESS)
7729 {
7730 *pu32Dst = u32Value;
7731 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7732 }
7733 }
7734 else
7735 {
7736 /* The intel docs talks about zero extending the selector register
7737 value. My actual intel CPU here might be zero extending the value
7738 but it still only writes the lower word... */
7739 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7740 * happens when crossing an electric page boundrary, is the high word checked
7741 * for write accessibility or not? Probably it is. What about segment limits?
7742 * It appears this behavior is also shared with trap error codes.
7743 *
7744 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7745 * ancient hardware when it actually did change. */
7746 uint16_t *pu16Dst;
7747 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7748 if (rc == VINF_SUCCESS)
7749 {
7750 *pu16Dst = (uint16_t)u32Value;
7751 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7752 }
7753 }
7754
7755 /* Commit the new RSP value unless we an access handler made trouble. */
7756 if (rc == VINF_SUCCESS)
7757 pCtx->rsp = uNewRsp;
7758
7759 return rc;
7760}
7761
7762
7763/**
7764 * Pushes a qword onto the stack.
7765 *
7766 * @returns Strict VBox status code.
7767 * @param pIemCpu The IEM per CPU data.
7768 * @param u64Value The value to push.
7769 */
7770IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7771{
7772 /* Increment the stack pointer. */
7773 uint64_t uNewRsp;
7774 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7775 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7776
7777 /* Write the word the lazy way. */
7778 uint64_t *pu64Dst;
7779 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7780 if (rc == VINF_SUCCESS)
7781 {
7782 *pu64Dst = u64Value;
7783 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7784 }
7785
7786 /* Commit the new RSP value unless we an access handler made trouble. */
7787 if (rc == VINF_SUCCESS)
7788 pCtx->rsp = uNewRsp;
7789
7790 return rc;
7791}
7792
7793
7794/**
7795 * Pops a word from the stack.
7796 *
7797 * @returns Strict VBox status code.
7798 * @param pIemCpu The IEM per CPU data.
7799 * @param pu16Value Where to store the popped value.
7800 */
7801IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7802{
7803 /* Increment the stack pointer. */
7804 uint64_t uNewRsp;
7805 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7806 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7807
7808 /* Write the word the lazy way. */
7809 uint16_t const *pu16Src;
7810 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7811 if (rc == VINF_SUCCESS)
7812 {
7813 *pu16Value = *pu16Src;
7814 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7815
7816 /* Commit the new RSP value. */
7817 if (rc == VINF_SUCCESS)
7818 pCtx->rsp = uNewRsp;
7819 }
7820
7821 return rc;
7822}
7823
7824
7825/**
7826 * Pops a dword from the stack.
7827 *
7828 * @returns Strict VBox status code.
7829 * @param pIemCpu The IEM per CPU data.
7830 * @param pu32Value Where to store the popped value.
7831 */
7832IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7833{
7834 /* Increment the stack pointer. */
7835 uint64_t uNewRsp;
7836 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7837 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7838
7839 /* Write the word the lazy way. */
7840 uint32_t const *pu32Src;
7841 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7842 if (rc == VINF_SUCCESS)
7843 {
7844 *pu32Value = *pu32Src;
7845 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7846
7847 /* Commit the new RSP value. */
7848 if (rc == VINF_SUCCESS)
7849 pCtx->rsp = uNewRsp;
7850 }
7851
7852 return rc;
7853}
7854
7855
7856/**
7857 * Pops a qword from the stack.
7858 *
7859 * @returns Strict VBox status code.
7860 * @param pIemCpu The IEM per CPU data.
7861 * @param pu64Value Where to store the popped value.
7862 */
7863IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7864{
7865 /* Increment the stack pointer. */
7866 uint64_t uNewRsp;
7867 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7868 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7869
7870 /* Write the word the lazy way. */
7871 uint64_t const *pu64Src;
7872 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7873 if (rc == VINF_SUCCESS)
7874 {
7875 *pu64Value = *pu64Src;
7876 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7877
7878 /* Commit the new RSP value. */
7879 if (rc == VINF_SUCCESS)
7880 pCtx->rsp = uNewRsp;
7881 }
7882
7883 return rc;
7884}
7885
7886
7887/**
7888 * Pushes a word onto the stack, using a temporary stack pointer.
7889 *
7890 * @returns Strict VBox status code.
7891 * @param pIemCpu The IEM per CPU data.
7892 * @param u16Value The value to push.
7893 * @param pTmpRsp Pointer to the temporary stack pointer.
7894 */
7895IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7896{
7897 /* Increment the stack pointer. */
7898 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7899 RTUINT64U NewRsp = *pTmpRsp;
7900 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7901
7902 /* Write the word the lazy way. */
7903 uint16_t *pu16Dst;
7904 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7905 if (rc == VINF_SUCCESS)
7906 {
7907 *pu16Dst = u16Value;
7908 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7909 }
7910
7911 /* Commit the new RSP value unless we an access handler made trouble. */
7912 if (rc == VINF_SUCCESS)
7913 *pTmpRsp = NewRsp;
7914
7915 return rc;
7916}
7917
7918
7919/**
7920 * Pushes a dword onto the stack, using a temporary stack pointer.
7921 *
7922 * @returns Strict VBox status code.
7923 * @param pIemCpu The IEM per CPU data.
7924 * @param u32Value The value to push.
7925 * @param pTmpRsp Pointer to the temporary stack pointer.
7926 */
7927IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7928{
7929 /* Increment the stack pointer. */
7930 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7931 RTUINT64U NewRsp = *pTmpRsp;
7932 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7933
7934 /* Write the word the lazy way. */
7935 uint32_t *pu32Dst;
7936 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7937 if (rc == VINF_SUCCESS)
7938 {
7939 *pu32Dst = u32Value;
7940 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7941 }
7942
7943 /* Commit the new RSP value unless we an access handler made trouble. */
7944 if (rc == VINF_SUCCESS)
7945 *pTmpRsp = NewRsp;
7946
7947 return rc;
7948}
7949
7950
7951/**
7952 * Pushes a dword onto the stack, using a temporary stack pointer.
7953 *
7954 * @returns Strict VBox status code.
7955 * @param pIemCpu The IEM per CPU data.
7956 * @param u64Value The value to push.
7957 * @param pTmpRsp Pointer to the temporary stack pointer.
7958 */
7959IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7960{
7961 /* Increment the stack pointer. */
7962 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7963 RTUINT64U NewRsp = *pTmpRsp;
7964 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7965
7966 /* Write the word the lazy way. */
7967 uint64_t *pu64Dst;
7968 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7969 if (rc == VINF_SUCCESS)
7970 {
7971 *pu64Dst = u64Value;
7972 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7973 }
7974
7975 /* Commit the new RSP value unless we an access handler made trouble. */
7976 if (rc == VINF_SUCCESS)
7977 *pTmpRsp = NewRsp;
7978
7979 return rc;
7980}
7981
7982
7983/**
7984 * Pops a word from the stack, using a temporary stack pointer.
7985 *
7986 * @returns Strict VBox status code.
7987 * @param pIemCpu The IEM per CPU data.
7988 * @param pu16Value Where to store the popped value.
7989 * @param pTmpRsp Pointer to the temporary stack pointer.
7990 */
7991IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7992{
7993 /* Increment the stack pointer. */
7994 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7995 RTUINT64U NewRsp = *pTmpRsp;
7996 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7997
7998 /* Write the word the lazy way. */
7999 uint16_t const *pu16Src;
8000 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8001 if (rc == VINF_SUCCESS)
8002 {
8003 *pu16Value = *pu16Src;
8004 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8005
8006 /* Commit the new RSP value. */
8007 if (rc == VINF_SUCCESS)
8008 *pTmpRsp = NewRsp;
8009 }
8010
8011 return rc;
8012}
8013
8014
8015/**
8016 * Pops a dword from the stack, using a temporary stack pointer.
8017 *
8018 * @returns Strict VBox status code.
8019 * @param pIemCpu The IEM per CPU data.
8020 * @param pu32Value Where to store the popped value.
8021 * @param pTmpRsp Pointer to the temporary stack pointer.
8022 */
8023IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
8024{
8025 /* Increment the stack pointer. */
8026 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8027 RTUINT64U NewRsp = *pTmpRsp;
8028 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
8029
8030 /* Write the word the lazy way. */
8031 uint32_t const *pu32Src;
8032 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8033 if (rc == VINF_SUCCESS)
8034 {
8035 *pu32Value = *pu32Src;
8036 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8037
8038 /* Commit the new RSP value. */
8039 if (rc == VINF_SUCCESS)
8040 *pTmpRsp = NewRsp;
8041 }
8042
8043 return rc;
8044}
8045
8046
8047/**
8048 * Pops a qword from the stack, using a temporary stack pointer.
8049 *
8050 * @returns Strict VBox status code.
8051 * @param pIemCpu The IEM per CPU data.
8052 * @param pu64Value Where to store the popped value.
8053 * @param pTmpRsp Pointer to the temporary stack pointer.
8054 */
8055IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
8056{
8057 /* Increment the stack pointer. */
8058 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8059 RTUINT64U NewRsp = *pTmpRsp;
8060 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8061
8062 /* Write the word the lazy way. */
8063 uint64_t const *pu64Src;
8064 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8065 if (rcStrict == VINF_SUCCESS)
8066 {
8067 *pu64Value = *pu64Src;
8068 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8069
8070 /* Commit the new RSP value. */
8071 if (rcStrict == VINF_SUCCESS)
8072 *pTmpRsp = NewRsp;
8073 }
8074
8075 return rcStrict;
8076}
8077
8078
8079/**
8080 * Begin a special stack push (used by interrupt, exceptions and such).
8081 *
8082 * This will raise \#SS or \#PF if appropriate.
8083 *
8084 * @returns Strict VBox status code.
8085 * @param pIemCpu The IEM per CPU data.
8086 * @param cbMem The number of bytes to push onto the stack.
8087 * @param ppvMem Where to return the pointer to the stack memory.
8088 * As with the other memory functions this could be
8089 * direct access or bounce buffered access, so
8090 * don't commit register until the commit call
8091 * succeeds.
8092 * @param puNewRsp Where to return the new RSP value. This must be
8093 * passed unchanged to
8094 * iemMemStackPushCommitSpecial().
8095 */
8096IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8097{
8098 Assert(cbMem < UINT8_MAX);
8099 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8100 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8101 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8102}
8103
8104
8105/**
8106 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8107 *
8108 * This will update the rSP.
8109 *
8110 * @returns Strict VBox status code.
8111 * @param pIemCpu The IEM per CPU data.
8112 * @param pvMem The pointer returned by
8113 * iemMemStackPushBeginSpecial().
8114 * @param uNewRsp The new RSP value returned by
8115 * iemMemStackPushBeginSpecial().
8116 */
8117IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8118{
8119 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8120 if (rcStrict == VINF_SUCCESS)
8121 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8122 return rcStrict;
8123}
8124
8125
8126/**
8127 * Begin a special stack pop (used by iret, retf and such).
8128 *
8129 * This will raise \#SS or \#PF if appropriate.
8130 *
8131 * @returns Strict VBox status code.
8132 * @param pIemCpu The IEM per CPU data.
8133 * @param cbMem The number of bytes to push onto the stack.
8134 * @param ppvMem Where to return the pointer to the stack memory.
8135 * @param puNewRsp Where to return the new RSP value. This must be
8136 * passed unchanged to
8137 * iemMemStackPopCommitSpecial() or applied
8138 * manually if iemMemStackPopDoneSpecial() is used.
8139 */
8140IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8141{
8142 Assert(cbMem < UINT8_MAX);
8143 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8144 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8145 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8146}
8147
8148
8149/**
8150 * Continue a special stack pop (used by iret and retf).
8151 *
8152 * This will raise \#SS or \#PF if appropriate.
8153 *
8154 * @returns Strict VBox status code.
8155 * @param pIemCpu The IEM per CPU data.
8156 * @param cbMem The number of bytes to push onto the stack.
8157 * @param ppvMem Where to return the pointer to the stack memory.
8158 * @param puNewRsp Where to return the new RSP value. This must be
8159 * passed unchanged to
8160 * iemMemStackPopCommitSpecial() or applied
8161 * manually if iemMemStackPopDoneSpecial() is used.
8162 */
8163IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8164{
8165 Assert(cbMem < UINT8_MAX);
8166 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8167 RTUINT64U NewRsp;
8168 NewRsp.u = *puNewRsp;
8169 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8170 *puNewRsp = NewRsp.u;
8171 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8172}
8173
8174
8175/**
8176 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8177 *
8178 * This will update the rSP.
8179 *
8180 * @returns Strict VBox status code.
8181 * @param pIemCpu The IEM per CPU data.
8182 * @param pvMem The pointer returned by
8183 * iemMemStackPopBeginSpecial().
8184 * @param uNewRsp The new RSP value returned by
8185 * iemMemStackPopBeginSpecial().
8186 */
8187IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8188{
8189 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8190 if (rcStrict == VINF_SUCCESS)
8191 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8192 return rcStrict;
8193}
8194
8195
8196/**
8197 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8198 * iemMemStackPopContinueSpecial).
8199 *
8200 * The caller will manually commit the rSP.
8201 *
8202 * @returns Strict VBox status code.
8203 * @param pIemCpu The IEM per CPU data.
8204 * @param pvMem The pointer returned by
8205 * iemMemStackPopBeginSpecial() or
8206 * iemMemStackPopContinueSpecial().
8207 */
8208IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8209{
8210 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8211}
8212
8213
8214/**
8215 * Fetches a system table byte.
8216 *
8217 * @returns Strict VBox status code.
8218 * @param pIemCpu The IEM per CPU data.
8219 * @param pbDst Where to return the byte.
8220 * @param iSegReg The index of the segment register to use for
8221 * this access. The base and limits are checked.
8222 * @param GCPtrMem The address of the guest memory.
8223 */
8224IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8225{
8226 /* The lazy approach for now... */
8227 uint8_t const *pbSrc;
8228 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8229 if (rc == VINF_SUCCESS)
8230 {
8231 *pbDst = *pbSrc;
8232 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8233 }
8234 return rc;
8235}
8236
8237
8238/**
8239 * Fetches a system table word.
8240 *
8241 * @returns Strict VBox status code.
8242 * @param pIemCpu The IEM per CPU data.
8243 * @param pu16Dst Where to return the word.
8244 * @param iSegReg The index of the segment register to use for
8245 * this access. The base and limits are checked.
8246 * @param GCPtrMem The address of the guest memory.
8247 */
8248IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8249{
8250 /* The lazy approach for now... */
8251 uint16_t const *pu16Src;
8252 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8253 if (rc == VINF_SUCCESS)
8254 {
8255 *pu16Dst = *pu16Src;
8256 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8257 }
8258 return rc;
8259}
8260
8261
8262/**
8263 * Fetches a system table dword.
8264 *
8265 * @returns Strict VBox status code.
8266 * @param pIemCpu The IEM per CPU data.
8267 * @param pu32Dst Where to return the dword.
8268 * @param iSegReg The index of the segment register to use for
8269 * this access. The base and limits are checked.
8270 * @param GCPtrMem The address of the guest memory.
8271 */
8272IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8273{
8274 /* The lazy approach for now... */
8275 uint32_t const *pu32Src;
8276 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8277 if (rc == VINF_SUCCESS)
8278 {
8279 *pu32Dst = *pu32Src;
8280 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8281 }
8282 return rc;
8283}
8284
8285
8286/**
8287 * Fetches a system table qword.
8288 *
8289 * @returns Strict VBox status code.
8290 * @param pIemCpu The IEM per CPU data.
8291 * @param pu64Dst Where to return the qword.
8292 * @param iSegReg The index of the segment register to use for
8293 * this access. The base and limits are checked.
8294 * @param GCPtrMem The address of the guest memory.
8295 */
8296IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8297{
8298 /* The lazy approach for now... */
8299 uint64_t const *pu64Src;
8300 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8301 if (rc == VINF_SUCCESS)
8302 {
8303 *pu64Dst = *pu64Src;
8304 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8305 }
8306 return rc;
8307}
8308
8309
8310/**
8311 * Fetches a descriptor table entry with caller specified error code.
8312 *
8313 * @returns Strict VBox status code.
8314 * @param pIemCpu The IEM per CPU.
8315 * @param pDesc Where to return the descriptor table entry.
8316 * @param uSel The selector which table entry to fetch.
8317 * @param uXcpt The exception to raise on table lookup error.
8318 * @param uErrorCode The error code associated with the exception.
8319 */
8320IEM_STATIC VBOXSTRICTRC
8321iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8322{
8323 AssertPtr(pDesc);
8324 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8325
8326 /** @todo did the 286 require all 8 bytes to be accessible? */
8327 /*
8328 * Get the selector table base and check bounds.
8329 */
8330 RTGCPTR GCPtrBase;
8331 if (uSel & X86_SEL_LDT)
8332 {
8333 if ( !pCtx->ldtr.Attr.n.u1Present
8334 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8335 {
8336 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8337 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8338 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8339 uErrorCode, 0);
8340 }
8341
8342 Assert(pCtx->ldtr.Attr.n.u1Present);
8343 GCPtrBase = pCtx->ldtr.u64Base;
8344 }
8345 else
8346 {
8347 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8348 {
8349 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8350 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8351 uErrorCode, 0);
8352 }
8353 GCPtrBase = pCtx->gdtr.pGdt;
8354 }
8355
8356 /*
8357 * Read the legacy descriptor and maybe the long mode extensions if
8358 * required.
8359 */
8360 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8361 if (rcStrict == VINF_SUCCESS)
8362 {
8363 if ( !IEM_IS_LONG_MODE(pIemCpu)
8364 || pDesc->Legacy.Gen.u1DescType)
8365 pDesc->Long.au64[1] = 0;
8366 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8367 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8368 else
8369 {
8370 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8371 /** @todo is this the right exception? */
8372 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8373 }
8374 }
8375 return rcStrict;
8376}
8377
8378
8379/**
8380 * Fetches a descriptor table entry.
8381 *
8382 * @returns Strict VBox status code.
8383 * @param pIemCpu The IEM per CPU.
8384 * @param pDesc Where to return the descriptor table entry.
8385 * @param uSel The selector which table entry to fetch.
8386 * @param uXcpt The exception to raise on table lookup error.
8387 */
8388IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8389{
8390 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8391}
8392
8393
8394/**
8395 * Fakes a long mode stack selector for SS = 0.
8396 *
8397 * @param pDescSs Where to return the fake stack descriptor.
8398 * @param uDpl The DPL we want.
8399 */
8400IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8401{
8402 pDescSs->Long.au64[0] = 0;
8403 pDescSs->Long.au64[1] = 0;
8404 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8405 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8406 pDescSs->Long.Gen.u2Dpl = uDpl;
8407 pDescSs->Long.Gen.u1Present = 1;
8408 pDescSs->Long.Gen.u1Long = 1;
8409}
8410
8411
8412/**
8413 * Marks the selector descriptor as accessed (only non-system descriptors).
8414 *
8415 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8416 * will therefore skip the limit checks.
8417 *
8418 * @returns Strict VBox status code.
8419 * @param pIemCpu The IEM per CPU.
8420 * @param uSel The selector.
8421 */
8422IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8423{
8424 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8425
8426 /*
8427 * Get the selector table base and calculate the entry address.
8428 */
8429 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8430 ? pCtx->ldtr.u64Base
8431 : pCtx->gdtr.pGdt;
8432 GCPtr += uSel & X86_SEL_MASK;
8433
8434 /*
8435 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8436 * ugly stuff to avoid this. This will make sure it's an atomic access
8437 * as well more or less remove any question about 8-bit or 32-bit accesss.
8438 */
8439 VBOXSTRICTRC rcStrict;
8440 uint32_t volatile *pu32;
8441 if ((GCPtr & 3) == 0)
8442 {
8443 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8444 GCPtr += 2 + 2;
8445 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8446 if (rcStrict != VINF_SUCCESS)
8447 return rcStrict;
8448 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8449 }
8450 else
8451 {
8452 /* The misaligned GDT/LDT case, map the whole thing. */
8453 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8454 if (rcStrict != VINF_SUCCESS)
8455 return rcStrict;
8456 switch ((uintptr_t)pu32 & 3)
8457 {
8458 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8459 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8460 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8461 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8462 }
8463 }
8464
8465 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8466}
8467
8468/** @} */
8469
8470
8471/*
8472 * Include the C/C++ implementation of instruction.
8473 */
8474#include "IEMAllCImpl.cpp.h"
8475
8476
8477
8478/** @name "Microcode" macros.
8479 *
8480 * The idea is that we should be able to use the same code to interpret
8481 * instructions as well as recompiler instructions. Thus this obfuscation.
8482 *
8483 * @{
8484 */
8485#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8486#define IEM_MC_END() }
8487#define IEM_MC_PAUSE() do {} while (0)
8488#define IEM_MC_CONTINUE() do {} while (0)
8489
8490/** Internal macro. */
8491#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8492 do \
8493 { \
8494 VBOXSTRICTRC rcStrict2 = a_Expr; \
8495 if (rcStrict2 != VINF_SUCCESS) \
8496 return rcStrict2; \
8497 } while (0)
8498
8499#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8500#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8501#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8502#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8503#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8504#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8505#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8506
8507#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8508#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8509 do { \
8510 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8511 return iemRaiseDeviceNotAvailable(pIemCpu); \
8512 } while (0)
8513#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8514 do { \
8515 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8516 return iemRaiseMathFault(pIemCpu); \
8517 } while (0)
8518#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8519 do { \
8520 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8521 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8522 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8523 return iemRaiseUndefinedOpcode(pIemCpu); \
8524 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8525 return iemRaiseDeviceNotAvailable(pIemCpu); \
8526 } while (0)
8527#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8528 do { \
8529 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8530 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8531 return iemRaiseUndefinedOpcode(pIemCpu); \
8532 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8533 return iemRaiseDeviceNotAvailable(pIemCpu); \
8534 } while (0)
8535#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8536 do { \
8537 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8538 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8539 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8540 return iemRaiseUndefinedOpcode(pIemCpu); \
8541 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8542 return iemRaiseDeviceNotAvailable(pIemCpu); \
8543 } while (0)
8544#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8545 do { \
8546 if (pIemCpu->uCpl != 0) \
8547 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8548 } while (0)
8549
8550
8551#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8552#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8553#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8554#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8555#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8556#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8557#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8558 uint32_t a_Name; \
8559 uint32_t *a_pName = &a_Name
8560#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8561 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8562
8563#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8564#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8565
8566#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8567#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8568#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8569#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8570#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8571#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8572#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8573#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8574#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8575#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8576#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8577#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8578#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8579#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8580#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8581#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8582#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8583#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8584#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8585#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8586#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8587#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8588#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8589#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8590#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8591#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8592#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8593#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8594#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8595/** @note Not for IOPL or IF testing or modification. */
8596#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8597#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8598#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8599#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8600
8601#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8602#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8603#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8604#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8605#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8606#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8607#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8608#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8609#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8610#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8611#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8612 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8613
8614#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8615#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8616/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8617 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8618#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8619#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8620/** @note Not for IOPL or IF testing or modification. */
8621#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8622
8623#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8624#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8625#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8626 do { \
8627 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8628 *pu32Reg += (a_u32Value); \
8629 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8630 } while (0)
8631#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8632
8633#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8634#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8635#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8636 do { \
8637 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8638 *pu32Reg -= (a_u32Value); \
8639 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8640 } while (0)
8641#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8642#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
8643
8644#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8645#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8646#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8647#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8648#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8649#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8650#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8651
8652#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8653#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8654#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8655#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8656
8657#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8658#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8659#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8660
8661#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8662#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
8663#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8664
8665#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8666#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8667#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8668
8669#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8670#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8671#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8672
8673#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8674
8675#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8676
8677#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8678#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8679#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8680 do { \
8681 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8682 *pu32Reg &= (a_u32Value); \
8683 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8684 } while (0)
8685#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8686
8687#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8688#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8689#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8690 do { \
8691 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8692 *pu32Reg |= (a_u32Value); \
8693 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8694 } while (0)
8695#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8696
8697
8698/** @note Not for IOPL or IF modification. */
8699#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8700/** @note Not for IOPL or IF modification. */
8701#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8702/** @note Not for IOPL or IF modification. */
8703#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8704
8705#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8706
8707
8708#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8709 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8710#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8711 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8712#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8713 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8714#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8715 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8716#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8717 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8718#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8719 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8720#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8721 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8722
8723#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8724 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8725#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8726 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8727#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8728 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8729#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8730 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8731#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8732 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8733 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8734 } while (0)
8735#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8736 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8737 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8738 } while (0)
8739#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8740 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8741#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8742 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8743#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8744 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8745
8746#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8748#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8749 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8750#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8751 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8752
8753#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8754 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8755#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8756 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8757#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8758 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8759
8760#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8761 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8762#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8764#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8765 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8766
8767#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8769
8770#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8772#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8773 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8774#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8775 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8776#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8777 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8778
8779#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8780 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8781#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8782 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8783#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8784 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8785
8786#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8787 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8788#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8789 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8790
8791
8792
8793#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8794 do { \
8795 uint8_t u8Tmp; \
8796 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8797 (a_u16Dst) = u8Tmp; \
8798 } while (0)
8799#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8800 do { \
8801 uint8_t u8Tmp; \
8802 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8803 (a_u32Dst) = u8Tmp; \
8804 } while (0)
8805#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8806 do { \
8807 uint8_t u8Tmp; \
8808 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8809 (a_u64Dst) = u8Tmp; \
8810 } while (0)
8811#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8812 do { \
8813 uint16_t u16Tmp; \
8814 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8815 (a_u32Dst) = u16Tmp; \
8816 } while (0)
8817#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8818 do { \
8819 uint16_t u16Tmp; \
8820 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8821 (a_u64Dst) = u16Tmp; \
8822 } while (0)
8823#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8824 do { \
8825 uint32_t u32Tmp; \
8826 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8827 (a_u64Dst) = u32Tmp; \
8828 } while (0)
8829
8830#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8831 do { \
8832 uint8_t u8Tmp; \
8833 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8834 (a_u16Dst) = (int8_t)u8Tmp; \
8835 } while (0)
8836#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8837 do { \
8838 uint8_t u8Tmp; \
8839 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8840 (a_u32Dst) = (int8_t)u8Tmp; \
8841 } while (0)
8842#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8843 do { \
8844 uint8_t u8Tmp; \
8845 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8846 (a_u64Dst) = (int8_t)u8Tmp; \
8847 } while (0)
8848#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8849 do { \
8850 uint16_t u16Tmp; \
8851 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8852 (a_u32Dst) = (int16_t)u16Tmp; \
8853 } while (0)
8854#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8855 do { \
8856 uint16_t u16Tmp; \
8857 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8858 (a_u64Dst) = (int16_t)u16Tmp; \
8859 } while (0)
8860#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8861 do { \
8862 uint32_t u32Tmp; \
8863 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8864 (a_u64Dst) = (int32_t)u32Tmp; \
8865 } while (0)
8866
8867#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8868 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8869#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8870 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8871#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8872 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8873#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8874 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8875
8876#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8877 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8878#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8879 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8880#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8881 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8882#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8883 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8884
8885#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8886#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8887#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8888#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8889#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8890#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8891#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8892 do { \
8893 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8894 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8895 } while (0)
8896
8897#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8898 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8899#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8900 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8901
8902
8903#define IEM_MC_PUSH_U16(a_u16Value) \
8904 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8905#define IEM_MC_PUSH_U32(a_u32Value) \
8906 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8907#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8908 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8909#define IEM_MC_PUSH_U64(a_u64Value) \
8910 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8911
8912#define IEM_MC_POP_U16(a_pu16Value) \
8913 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8914#define IEM_MC_POP_U32(a_pu32Value) \
8915 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8916#define IEM_MC_POP_U64(a_pu64Value) \
8917 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8918
8919/** Maps guest memory for direct or bounce buffered access.
8920 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8921 * @remarks May return.
8922 */
8923#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8924 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8925
8926/** Maps guest memory for direct or bounce buffered access.
8927 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8928 * @remarks May return.
8929 */
8930#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8931 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8932
8933/** Commits the memory and unmaps the guest memory.
8934 * @remarks May return.
8935 */
8936#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8937 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8938
8939/** Commits the memory and unmaps the guest memory unless the FPU status word
8940 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8941 * that would cause FLD not to store.
8942 *
8943 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8944 * store, while \#P will not.
8945 *
8946 * @remarks May in theory return - for now.
8947 */
8948#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8949 do { \
8950 if ( !(a_u16FSW & X86_FSW_ES) \
8951 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8952 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8953 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8954 } while (0)
8955
8956/** Calculate efficient address from R/M. */
8957#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8958 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8959
8960#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8961#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8962#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8963#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8964#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8965#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8966#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8967
8968/**
8969 * Defers the rest of the instruction emulation to a C implementation routine
8970 * and returns, only taking the standard parameters.
8971 *
8972 * @param a_pfnCImpl The pointer to the C routine.
8973 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8974 */
8975#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8976
8977/**
8978 * Defers the rest of instruction emulation to a C implementation routine and
8979 * returns, taking one argument in addition to the standard ones.
8980 *
8981 * @param a_pfnCImpl The pointer to the C routine.
8982 * @param a0 The argument.
8983 */
8984#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8985
8986/**
8987 * Defers the rest of the instruction emulation to a C implementation routine
8988 * and returns, taking two arguments in addition to the standard ones.
8989 *
8990 * @param a_pfnCImpl The pointer to the C routine.
8991 * @param a0 The first extra argument.
8992 * @param a1 The second extra argument.
8993 */
8994#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8995
8996/**
8997 * Defers the rest of the instruction emulation to a C implementation routine
8998 * and returns, taking three arguments in addition to the standard ones.
8999 *
9000 * @param a_pfnCImpl The pointer to the C routine.
9001 * @param a0 The first extra argument.
9002 * @param a1 The second extra argument.
9003 * @param a2 The third extra argument.
9004 */
9005#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
9006
9007/**
9008 * Defers the rest of the instruction emulation to a C implementation routine
9009 * and returns, taking four arguments in addition to the standard ones.
9010 *
9011 * @param a_pfnCImpl The pointer to the C routine.
9012 * @param a0 The first extra argument.
9013 * @param a1 The second extra argument.
9014 * @param a2 The third extra argument.
9015 * @param a3 The fourth extra argument.
9016 */
9017#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
9018
9019/**
9020 * Defers the rest of the instruction emulation to a C implementation routine
9021 * and returns, taking two arguments in addition to the standard ones.
9022 *
9023 * @param a_pfnCImpl The pointer to the C routine.
9024 * @param a0 The first extra argument.
9025 * @param a1 The second extra argument.
9026 * @param a2 The third extra argument.
9027 * @param a3 The fourth extra argument.
9028 * @param a4 The fifth extra argument.
9029 */
9030#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
9031
9032/**
9033 * Defers the entire instruction emulation to a C implementation routine and
9034 * returns, only taking the standard parameters.
9035 *
9036 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9037 *
9038 * @param a_pfnCImpl The pointer to the C routine.
9039 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
9040 */
9041#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
9042
9043/**
9044 * Defers the entire instruction emulation to a C implementation routine and
9045 * returns, taking one argument in addition to the standard ones.
9046 *
9047 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9048 *
9049 * @param a_pfnCImpl The pointer to the C routine.
9050 * @param a0 The argument.
9051 */
9052#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
9053
9054/**
9055 * Defers the entire instruction emulation to a C implementation routine and
9056 * returns, taking two arguments in addition to the standard ones.
9057 *
9058 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9059 *
9060 * @param a_pfnCImpl The pointer to the C routine.
9061 * @param a0 The first extra argument.
9062 * @param a1 The second extra argument.
9063 */
9064#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
9065
9066/**
9067 * Defers the entire instruction emulation to a C implementation routine and
9068 * returns, taking three arguments in addition to the standard ones.
9069 *
9070 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9071 *
9072 * @param a_pfnCImpl The pointer to the C routine.
9073 * @param a0 The first extra argument.
9074 * @param a1 The second extra argument.
9075 * @param a2 The third extra argument.
9076 */
9077#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
9078
9079/**
9080 * Calls a FPU assembly implementation taking one visible argument.
9081 *
9082 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9083 * @param a0 The first extra argument.
9084 */
9085#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
9086 do { \
9087 iemFpuPrepareUsage(pIemCpu); \
9088 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
9089 } while (0)
9090
9091/**
9092 * Calls a FPU assembly implementation taking two visible arguments.
9093 *
9094 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9095 * @param a0 The first extra argument.
9096 * @param a1 The second extra argument.
9097 */
9098#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9099 do { \
9100 iemFpuPrepareUsage(pIemCpu); \
9101 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9102 } while (0)
9103
9104/**
9105 * Calls a FPU assembly implementation taking three visible arguments.
9106 *
9107 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9108 * @param a0 The first extra argument.
9109 * @param a1 The second extra argument.
9110 * @param a2 The third extra argument.
9111 */
9112#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9113 do { \
9114 iemFpuPrepareUsage(pIemCpu); \
9115 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9116 } while (0)
9117
9118#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9119 do { \
9120 (a_FpuData).FSW = (a_FSW); \
9121 (a_FpuData).r80Result = *(a_pr80Value); \
9122 } while (0)
9123
9124/** Pushes FPU result onto the stack. */
9125#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9126 iemFpuPushResult(pIemCpu, &a_FpuData)
9127/** Pushes FPU result onto the stack and sets the FPUDP. */
9128#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9129 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9130
9131/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9132#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9133 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9134
9135/** Stores FPU result in a stack register. */
9136#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9137 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9138/** Stores FPU result in a stack register and pops the stack. */
9139#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9140 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9141/** Stores FPU result in a stack register and sets the FPUDP. */
9142#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9143 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9144/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9145 * stack. */
9146#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9147 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9148
9149/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9150#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9151 iemFpuUpdateOpcodeAndIp(pIemCpu)
9152/** Free a stack register (for FFREE and FFREEP). */
9153#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9154 iemFpuStackFree(pIemCpu, a_iStReg)
9155/** Increment the FPU stack pointer. */
9156#define IEM_MC_FPU_STACK_INC_TOP() \
9157 iemFpuStackIncTop(pIemCpu)
9158/** Decrement the FPU stack pointer. */
9159#define IEM_MC_FPU_STACK_DEC_TOP() \
9160 iemFpuStackDecTop(pIemCpu)
9161
9162/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9163#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9164 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9165/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9166#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9167 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9168/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9169#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9170 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9171/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9172#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9173 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9174/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9175 * stack. */
9176#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9177 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9178/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9179#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9180 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9181
9182/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9183#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9184 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9185/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9186 * stack. */
9187#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9188 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9189/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9190 * FPUDS. */
9191#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9192 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9193/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9194 * FPUDS. Pops stack. */
9195#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9196 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9197/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9198 * stack twice. */
9199#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9200 iemFpuStackUnderflowThenPopPop(pIemCpu)
9201/** Raises a FPU stack underflow exception for an instruction pushing a result
9202 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9203#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9204 iemFpuStackPushUnderflow(pIemCpu)
9205/** Raises a FPU stack underflow exception for an instruction pushing a result
9206 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9207#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9208 iemFpuStackPushUnderflowTwo(pIemCpu)
9209
9210/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9211 * FPUIP, FPUCS and FOP. */
9212#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9213 iemFpuStackPushOverflow(pIemCpu)
9214/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9215 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9216#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9217 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9218/** Indicates that we (might) have modified the FPU state. */
9219#define IEM_MC_USED_FPU() \
9220 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9221
9222/**
9223 * Calls a MMX assembly implementation taking two visible arguments.
9224 *
9225 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9226 * @param a0 The first extra argument.
9227 * @param a1 The second extra argument.
9228 */
9229#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9230 do { \
9231 iemFpuPrepareUsage(pIemCpu); \
9232 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9233 } while (0)
9234
9235/**
9236 * Calls a MMX assembly implementation taking three visible arguments.
9237 *
9238 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9239 * @param a0 The first extra argument.
9240 * @param a1 The second extra argument.
9241 * @param a2 The third extra argument.
9242 */
9243#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9244 do { \
9245 iemFpuPrepareUsage(pIemCpu); \
9246 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9247 } while (0)
9248
9249
9250/**
9251 * Calls a SSE assembly implementation taking two visible arguments.
9252 *
9253 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9254 * @param a0 The first extra argument.
9255 * @param a1 The second extra argument.
9256 */
9257#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9258 do { \
9259 iemFpuPrepareUsageSse(pIemCpu); \
9260 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9261 } while (0)
9262
9263/**
9264 * Calls a SSE assembly implementation taking three visible arguments.
9265 *
9266 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9267 * @param a0 The first extra argument.
9268 * @param a1 The second extra argument.
9269 * @param a2 The third extra argument.
9270 */
9271#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9272 do { \
9273 iemFpuPrepareUsageSse(pIemCpu); \
9274 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9275 } while (0)
9276
9277
9278/** @note Not for IOPL or IF testing. */
9279#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9280/** @note Not for IOPL or IF testing. */
9281#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9282/** @note Not for IOPL or IF testing. */
9283#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9284/** @note Not for IOPL or IF testing. */
9285#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9286/** @note Not for IOPL or IF testing. */
9287#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9288 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9289 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9290/** @note Not for IOPL or IF testing. */
9291#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9292 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9293 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9294/** @note Not for IOPL or IF testing. */
9295#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9296 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9297 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9298 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9299/** @note Not for IOPL or IF testing. */
9300#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9301 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9302 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9303 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9304#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9305#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9306#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9307/** @note Not for IOPL or IF testing. */
9308#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9309 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9310 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9311/** @note Not for IOPL or IF testing. */
9312#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9313 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9314 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9315/** @note Not for IOPL or IF testing. */
9316#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9317 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9318 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9319/** @note Not for IOPL or IF testing. */
9320#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9321 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9322 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9323/** @note Not for IOPL or IF testing. */
9324#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9325 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9326 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9327/** @note Not for IOPL or IF testing. */
9328#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9329 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9330 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9331#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9332#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9333#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9334 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9335#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9336 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9337#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9338 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9339#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9340 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9341#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9342 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9343#define IEM_MC_IF_FCW_IM() \
9344 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9345
9346#define IEM_MC_ELSE() } else {
9347#define IEM_MC_ENDIF() } do {} while (0)
9348
9349/** @} */
9350
9351
9352/** @name Opcode Debug Helpers.
9353 * @{
9354 */
9355#ifdef DEBUG
9356# define IEMOP_MNEMONIC(a_szMnemonic) \
9357 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9358 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9359# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9360 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9361 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9362#else
9363# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9364# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9365#endif
9366
9367/** @} */
9368
9369
9370/** @name Opcode Helpers.
9371 * @{
9372 */
9373
9374#ifdef IN_RING3
9375# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9376 do { \
9377 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9378 else \
9379 { \
9380 DBGFSTOP(IEMCPU_TO_VM(pIemCpu)); \
9381 return IEMOP_RAISE_INVALID_OPCODE(); \
9382 } \
9383 } while (0)
9384#else
9385# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9386 do { \
9387 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9388 else return IEMOP_RAISE_INVALID_OPCODE(); \
9389 } while (0)
9390#endif
9391
9392/** The instruction requires a 186 or later. */
9393#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
9394# define IEMOP_HLP_MIN_186() do { } while (0)
9395#else
9396# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
9397#endif
9398
9399/** The instruction requires a 286 or later. */
9400#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
9401# define IEMOP_HLP_MIN_286() do { } while (0)
9402#else
9403# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
9404#endif
9405
9406/** The instruction requires a 386 or later. */
9407#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9408# define IEMOP_HLP_MIN_386() do { } while (0)
9409#else
9410# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
9411#endif
9412
9413/** The instruction requires a 386 or later if the given expression is true. */
9414#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9415# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
9416#else
9417# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
9418#endif
9419
9420/** The instruction requires a 486 or later. */
9421#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
9422# define IEMOP_HLP_MIN_486() do { } while (0)
9423#else
9424# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
9425#endif
9426
9427/** The instruction requires a Pentium (586) or later. */
9428#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_586
9429# define IEMOP_HLP_MIN_586() do { } while (0)
9430#else
9431# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_586, true)
9432#endif
9433
9434/** The instruction requires a PentiumPro (686) or later. */
9435#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_686
9436# define IEMOP_HLP_MIN_686() do { } while (0)
9437#else
9438# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_686, true)
9439#endif
9440
9441
9442/** The instruction raises an \#UD in real and V8086 mode. */
9443#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9444 do \
9445 { \
9446 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9447 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9448 } while (0)
9449
9450/** The instruction allows no lock prefixing (in this encoding), throw \#UD if
9451 * lock prefixed.
9452 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9453#define IEMOP_HLP_NO_LOCK_PREFIX() \
9454 do \
9455 { \
9456 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9457 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9458 } while (0)
9459
9460/** The instruction is not available in 64-bit mode, throw \#UD if we're in
9461 * 64-bit mode. */
9462#define IEMOP_HLP_NO_64BIT() \
9463 do \
9464 { \
9465 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9466 return IEMOP_RAISE_INVALID_OPCODE(); \
9467 } while (0)
9468
9469/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
9470 * 64-bit mode. */
9471#define IEMOP_HLP_ONLY_64BIT() \
9472 do \
9473 { \
9474 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9475 return IEMOP_RAISE_INVALID_OPCODE(); \
9476 } while (0)
9477
9478/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9479#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9480 do \
9481 { \
9482 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9483 iemRecalEffOpSize64Default(pIemCpu); \
9484 } while (0)
9485
9486/** The instruction has 64-bit operand size if 64-bit mode. */
9487#define IEMOP_HLP_64BIT_OP_SIZE() \
9488 do \
9489 { \
9490 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9491 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9492 } while (0)
9493
9494/** Only a REX prefix immediately preceeding the first opcode byte takes
9495 * effect. This macro helps ensuring this as well as logging bad guest code. */
9496#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9497 do \
9498 { \
9499 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9500 { \
9501 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9502 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9503 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9504 pIemCpu->uRexB = 0; \
9505 pIemCpu->uRexIndex = 0; \
9506 pIemCpu->uRexReg = 0; \
9507 iemRecalEffOpSize(pIemCpu); \
9508 } \
9509 } while (0)
9510
9511/**
9512 * Done decoding.
9513 */
9514#define IEMOP_HLP_DONE_DECODING() \
9515 do \
9516 { \
9517 /*nothing for now, maybe later... */ \
9518 } while (0)
9519
9520/**
9521 * Done decoding, raise \#UD exception if lock prefix present.
9522 */
9523#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9524 do \
9525 { \
9526 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9527 { /* likely */ } \
9528 else \
9529 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9530 } while (0)
9531#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9532 do \
9533 { \
9534 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9535 { /* likely */ } \
9536 else \
9537 { \
9538 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9539 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9540 } \
9541 } while (0)
9542#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9543 do \
9544 { \
9545 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9546 { /* likely */ } \
9547 else \
9548 { \
9549 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9550 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9551 } \
9552 } while (0)
9553/**
9554 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9555 * are present.
9556 */
9557#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9558 do \
9559 { \
9560 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9561 { /* likely */ } \
9562 else \
9563 return IEMOP_RAISE_INVALID_OPCODE(); \
9564 } while (0)
9565
9566
9567/**
9568 * Calculates the effective address of a ModR/M memory operand.
9569 *
9570 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9571 *
9572 * @return Strict VBox status code.
9573 * @param pIemCpu The IEM per CPU data.
9574 * @param bRm The ModRM byte.
9575 * @param cbImm The size of any immediate following the
9576 * effective address opcode bytes. Important for
9577 * RIP relative addressing.
9578 * @param pGCPtrEff Where to return the effective address.
9579 */
9580IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9581{
9582 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9583 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9584#define SET_SS_DEF() \
9585 do \
9586 { \
9587 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9588 pIemCpu->iEffSeg = X86_SREG_SS; \
9589 } while (0)
9590
9591 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9592 {
9593/** @todo Check the effective address size crap! */
9594 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9595 {
9596 uint16_t u16EffAddr;
9597
9598 /* Handle the disp16 form with no registers first. */
9599 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9600 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9601 else
9602 {
9603 /* Get the displacment. */
9604 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9605 {
9606 case 0: u16EffAddr = 0; break;
9607 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9608 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9609 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9610 }
9611
9612 /* Add the base and index registers to the disp. */
9613 switch (bRm & X86_MODRM_RM_MASK)
9614 {
9615 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9616 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9617 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9618 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9619 case 4: u16EffAddr += pCtx->si; break;
9620 case 5: u16EffAddr += pCtx->di; break;
9621 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9622 case 7: u16EffAddr += pCtx->bx; break;
9623 }
9624 }
9625
9626 *pGCPtrEff = u16EffAddr;
9627 }
9628 else
9629 {
9630 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9631 uint32_t u32EffAddr;
9632
9633 /* Handle the disp32 form with no registers first. */
9634 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9635 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9636 else
9637 {
9638 /* Get the register (or SIB) value. */
9639 switch ((bRm & X86_MODRM_RM_MASK))
9640 {
9641 case 0: u32EffAddr = pCtx->eax; break;
9642 case 1: u32EffAddr = pCtx->ecx; break;
9643 case 2: u32EffAddr = pCtx->edx; break;
9644 case 3: u32EffAddr = pCtx->ebx; break;
9645 case 4: /* SIB */
9646 {
9647 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9648
9649 /* Get the index and scale it. */
9650 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9651 {
9652 case 0: u32EffAddr = pCtx->eax; break;
9653 case 1: u32EffAddr = pCtx->ecx; break;
9654 case 2: u32EffAddr = pCtx->edx; break;
9655 case 3: u32EffAddr = pCtx->ebx; break;
9656 case 4: u32EffAddr = 0; /*none */ break;
9657 case 5: u32EffAddr = pCtx->ebp; break;
9658 case 6: u32EffAddr = pCtx->esi; break;
9659 case 7: u32EffAddr = pCtx->edi; break;
9660 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9661 }
9662 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9663
9664 /* add base */
9665 switch (bSib & X86_SIB_BASE_MASK)
9666 {
9667 case 0: u32EffAddr += pCtx->eax; break;
9668 case 1: u32EffAddr += pCtx->ecx; break;
9669 case 2: u32EffAddr += pCtx->edx; break;
9670 case 3: u32EffAddr += pCtx->ebx; break;
9671 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9672 case 5:
9673 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9674 {
9675 u32EffAddr += pCtx->ebp;
9676 SET_SS_DEF();
9677 }
9678 else
9679 {
9680 uint32_t u32Disp;
9681 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9682 u32EffAddr += u32Disp;
9683 }
9684 break;
9685 case 6: u32EffAddr += pCtx->esi; break;
9686 case 7: u32EffAddr += pCtx->edi; break;
9687 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9688 }
9689 break;
9690 }
9691 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9692 case 6: u32EffAddr = pCtx->esi; break;
9693 case 7: u32EffAddr = pCtx->edi; break;
9694 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9695 }
9696
9697 /* Get and add the displacement. */
9698 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9699 {
9700 case 0:
9701 break;
9702 case 1:
9703 {
9704 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9705 u32EffAddr += i8Disp;
9706 break;
9707 }
9708 case 2:
9709 {
9710 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9711 u32EffAddr += u32Disp;
9712 break;
9713 }
9714 default:
9715 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9716 }
9717
9718 }
9719 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9720 *pGCPtrEff = u32EffAddr;
9721 else
9722 {
9723 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9724 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9725 }
9726 }
9727 }
9728 else
9729 {
9730 uint64_t u64EffAddr;
9731
9732 /* Handle the rip+disp32 form with no registers first. */
9733 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9734 {
9735 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9736 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9737 }
9738 else
9739 {
9740 /* Get the register (or SIB) value. */
9741 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9742 {
9743 case 0: u64EffAddr = pCtx->rax; break;
9744 case 1: u64EffAddr = pCtx->rcx; break;
9745 case 2: u64EffAddr = pCtx->rdx; break;
9746 case 3: u64EffAddr = pCtx->rbx; break;
9747 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9748 case 6: u64EffAddr = pCtx->rsi; break;
9749 case 7: u64EffAddr = pCtx->rdi; break;
9750 case 8: u64EffAddr = pCtx->r8; break;
9751 case 9: u64EffAddr = pCtx->r9; break;
9752 case 10: u64EffAddr = pCtx->r10; break;
9753 case 11: u64EffAddr = pCtx->r11; break;
9754 case 13: u64EffAddr = pCtx->r13; break;
9755 case 14: u64EffAddr = pCtx->r14; break;
9756 case 15: u64EffAddr = pCtx->r15; break;
9757 /* SIB */
9758 case 4:
9759 case 12:
9760 {
9761 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9762
9763 /* Get the index and scale it. */
9764 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9765 {
9766 case 0: u64EffAddr = pCtx->rax; break;
9767 case 1: u64EffAddr = pCtx->rcx; break;
9768 case 2: u64EffAddr = pCtx->rdx; break;
9769 case 3: u64EffAddr = pCtx->rbx; break;
9770 case 4: u64EffAddr = 0; /*none */ break;
9771 case 5: u64EffAddr = pCtx->rbp; break;
9772 case 6: u64EffAddr = pCtx->rsi; break;
9773 case 7: u64EffAddr = pCtx->rdi; break;
9774 case 8: u64EffAddr = pCtx->r8; break;
9775 case 9: u64EffAddr = pCtx->r9; break;
9776 case 10: u64EffAddr = pCtx->r10; break;
9777 case 11: u64EffAddr = pCtx->r11; break;
9778 case 12: u64EffAddr = pCtx->r12; break;
9779 case 13: u64EffAddr = pCtx->r13; break;
9780 case 14: u64EffAddr = pCtx->r14; break;
9781 case 15: u64EffAddr = pCtx->r15; break;
9782 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9783 }
9784 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9785
9786 /* add base */
9787 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9788 {
9789 case 0: u64EffAddr += pCtx->rax; break;
9790 case 1: u64EffAddr += pCtx->rcx; break;
9791 case 2: u64EffAddr += pCtx->rdx; break;
9792 case 3: u64EffAddr += pCtx->rbx; break;
9793 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9794 case 6: u64EffAddr += pCtx->rsi; break;
9795 case 7: u64EffAddr += pCtx->rdi; break;
9796 case 8: u64EffAddr += pCtx->r8; break;
9797 case 9: u64EffAddr += pCtx->r9; break;
9798 case 10: u64EffAddr += pCtx->r10; break;
9799 case 11: u64EffAddr += pCtx->r11; break;
9800 case 12: u64EffAddr += pCtx->r12; break;
9801 case 14: u64EffAddr += pCtx->r14; break;
9802 case 15: u64EffAddr += pCtx->r15; break;
9803 /* complicated encodings */
9804 case 5:
9805 case 13:
9806 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9807 {
9808 if (!pIemCpu->uRexB)
9809 {
9810 u64EffAddr += pCtx->rbp;
9811 SET_SS_DEF();
9812 }
9813 else
9814 u64EffAddr += pCtx->r13;
9815 }
9816 else
9817 {
9818 uint32_t u32Disp;
9819 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9820 u64EffAddr += (int32_t)u32Disp;
9821 }
9822 break;
9823 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9824 }
9825 break;
9826 }
9827 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9828 }
9829
9830 /* Get and add the displacement. */
9831 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9832 {
9833 case 0:
9834 break;
9835 case 1:
9836 {
9837 int8_t i8Disp;
9838 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9839 u64EffAddr += i8Disp;
9840 break;
9841 }
9842 case 2:
9843 {
9844 uint32_t u32Disp;
9845 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9846 u64EffAddr += (int32_t)u32Disp;
9847 break;
9848 }
9849 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9850 }
9851
9852 }
9853
9854 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9855 *pGCPtrEff = u64EffAddr;
9856 else
9857 {
9858 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9859 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9860 }
9861 }
9862
9863 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9864 return VINF_SUCCESS;
9865}
9866
9867/** @} */
9868
9869
9870
9871/*
9872 * Include the instructions
9873 */
9874#include "IEMAllInstructions.cpp.h"
9875
9876
9877
9878
9879#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9880
9881/**
9882 * Sets up execution verification mode.
9883 */
9884IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9885{
9886 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9887 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9888
9889 /*
9890 * Always note down the address of the current instruction.
9891 */
9892 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9893 pIemCpu->uOldRip = pOrgCtx->rip;
9894
9895 /*
9896 * Enable verification and/or logging.
9897 */
9898 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9899 if ( fNewNoRem
9900 && ( 0
9901#if 0 /* auto enable on first paged protected mode interrupt */
9902 || ( pOrgCtx->eflags.Bits.u1IF
9903 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9904 && TRPMHasTrap(pVCpu)
9905 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9906#endif
9907#if 0
9908 || ( pOrgCtx->cs == 0x10
9909 && ( pOrgCtx->rip == 0x90119e3e
9910 || pOrgCtx->rip == 0x901d9810)
9911#endif
9912#if 0 /* Auto enable DSL - FPU stuff. */
9913 || ( pOrgCtx->cs == 0x10
9914 && (// pOrgCtx->rip == 0xc02ec07f
9915 //|| pOrgCtx->rip == 0xc02ec082
9916 //|| pOrgCtx->rip == 0xc02ec0c9
9917 0
9918 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9919#endif
9920#if 0 /* Auto enable DSL - fstp st0 stuff. */
9921 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9922#endif
9923#if 0
9924 || pOrgCtx->rip == 0x9022bb3a
9925#endif
9926#if 0
9927 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9928#endif
9929#if 0
9930 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9931 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9932#endif
9933#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9934 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9935 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9936 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9937#endif
9938#if 0 /* NT4SP1 - xadd early boot. */
9939 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9940#endif
9941#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9942 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9943#endif
9944#if 0 /* NT4SP1 - cmpxchg (AMD). */
9945 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9946#endif
9947#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9948 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9949#endif
9950#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9951 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9952
9953#endif
9954#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9955 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9956
9957#endif
9958#if 0 /* NT4SP1 - frstor [ecx] */
9959 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9960#endif
9961#if 0 /* xxxxxx - All long mode code. */
9962 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9963#endif
9964#if 0 /* rep movsq linux 3.7 64-bit boot. */
9965 || (pOrgCtx->rip == 0x0000000000100241)
9966#endif
9967#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9968 || (pOrgCtx->rip == 0x000000000215e240)
9969#endif
9970#if 0 /* DOS's size-overridden iret to v8086. */
9971 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9972#endif
9973 )
9974 )
9975 {
9976 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9977 RTLogFlags(NULL, "enabled");
9978 fNewNoRem = false;
9979 }
9980 if (fNewNoRem != pIemCpu->fNoRem)
9981 {
9982 pIemCpu->fNoRem = fNewNoRem;
9983 if (!fNewNoRem)
9984 {
9985 LogAlways(("Enabling verification mode!\n"));
9986 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9987 }
9988 else
9989 LogAlways(("Disabling verification mode!\n"));
9990 }
9991
9992 /*
9993 * Switch state.
9994 */
9995 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9996 {
9997 static CPUMCTX s_DebugCtx; /* Ugly! */
9998
9999 s_DebugCtx = *pOrgCtx;
10000 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
10001 }
10002
10003 /*
10004 * See if there is an interrupt pending in TRPM and inject it if we can.
10005 */
10006 pIemCpu->uInjectCpl = UINT8_MAX;
10007 if ( pOrgCtx->eflags.Bits.u1IF
10008 && TRPMHasTrap(pVCpu)
10009 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
10010 {
10011 uint8_t u8TrapNo;
10012 TRPMEVENT enmType;
10013 RTGCUINT uErrCode;
10014 RTGCPTR uCr2;
10015 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
10016 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
10017 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10018 TRPMResetTrap(pVCpu);
10019 pIemCpu->uInjectCpl = pIemCpu->uCpl;
10020 }
10021
10022 /*
10023 * Reset the counters.
10024 */
10025 pIemCpu->cIOReads = 0;
10026 pIemCpu->cIOWrites = 0;
10027 pIemCpu->fIgnoreRaxRdx = false;
10028 pIemCpu->fOverlappingMovs = false;
10029 pIemCpu->fProblematicMemory = false;
10030 pIemCpu->fUndefinedEFlags = 0;
10031
10032 if (IEM_VERIFICATION_ENABLED(pIemCpu))
10033 {
10034 /*
10035 * Free all verification records.
10036 */
10037 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
10038 pIemCpu->pIemEvtRecHead = NULL;
10039 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
10040 do
10041 {
10042 while (pEvtRec)
10043 {
10044 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
10045 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
10046 pIemCpu->pFreeEvtRec = pEvtRec;
10047 pEvtRec = pNext;
10048 }
10049 pEvtRec = pIemCpu->pOtherEvtRecHead;
10050 pIemCpu->pOtherEvtRecHead = NULL;
10051 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
10052 } while (pEvtRec);
10053 }
10054}
10055
10056
10057/**
10058 * Allocate an event record.
10059 * @returns Pointer to a record.
10060 */
10061IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
10062{
10063 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10064 return NULL;
10065
10066 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
10067 if (pEvtRec)
10068 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
10069 else
10070 {
10071 if (!pIemCpu->ppIemEvtRecNext)
10072 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
10073
10074 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
10075 if (!pEvtRec)
10076 return NULL;
10077 }
10078 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
10079 pEvtRec->pNext = NULL;
10080 return pEvtRec;
10081}
10082
10083
10084/**
10085 * IOMMMIORead notification.
10086 */
10087VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
10088{
10089 PVMCPU pVCpu = VMMGetCpu(pVM);
10090 if (!pVCpu)
10091 return;
10092 PIEMCPU pIemCpu = &pVCpu->iem.s;
10093 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10094 if (!pEvtRec)
10095 return;
10096 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
10097 pEvtRec->u.RamRead.GCPhys = GCPhys;
10098 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
10099 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10100 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10101}
10102
10103
10104/**
10105 * IOMMMIOWrite notification.
10106 */
10107VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
10108{
10109 PVMCPU pVCpu = VMMGetCpu(pVM);
10110 if (!pVCpu)
10111 return;
10112 PIEMCPU pIemCpu = &pVCpu->iem.s;
10113 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10114 if (!pEvtRec)
10115 return;
10116 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
10117 pEvtRec->u.RamWrite.GCPhys = GCPhys;
10118 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
10119 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
10120 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
10121 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
10122 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
10123 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10124 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10125}
10126
10127
10128/**
10129 * IOMIOPortRead notification.
10130 */
10131VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
10132{
10133 PVMCPU pVCpu = VMMGetCpu(pVM);
10134 if (!pVCpu)
10135 return;
10136 PIEMCPU pIemCpu = &pVCpu->iem.s;
10137 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10138 if (!pEvtRec)
10139 return;
10140 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10141 pEvtRec->u.IOPortRead.Port = Port;
10142 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10143 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10144 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10145}
10146
10147/**
10148 * IOMIOPortWrite notification.
10149 */
10150VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10151{
10152 PVMCPU pVCpu = VMMGetCpu(pVM);
10153 if (!pVCpu)
10154 return;
10155 PIEMCPU pIemCpu = &pVCpu->iem.s;
10156 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10157 if (!pEvtRec)
10158 return;
10159 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10160 pEvtRec->u.IOPortWrite.Port = Port;
10161 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10162 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10163 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10164 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10165}
10166
10167
10168VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10169{
10170 PVMCPU pVCpu = VMMGetCpu(pVM);
10171 if (!pVCpu)
10172 return;
10173 PIEMCPU pIemCpu = &pVCpu->iem.s;
10174 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10175 if (!pEvtRec)
10176 return;
10177 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
10178 pEvtRec->u.IOPortStrRead.Port = Port;
10179 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
10180 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
10181 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10182 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10183}
10184
10185
10186VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10187{
10188 PVMCPU pVCpu = VMMGetCpu(pVM);
10189 if (!pVCpu)
10190 return;
10191 PIEMCPU pIemCpu = &pVCpu->iem.s;
10192 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10193 if (!pEvtRec)
10194 return;
10195 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
10196 pEvtRec->u.IOPortStrWrite.Port = Port;
10197 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
10198 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
10199 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10200 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10201}
10202
10203
10204/**
10205 * Fakes and records an I/O port read.
10206 *
10207 * @returns VINF_SUCCESS.
10208 * @param pIemCpu The IEM per CPU data.
10209 * @param Port The I/O port.
10210 * @param pu32Value Where to store the fake value.
10211 * @param cbValue The size of the access.
10212 */
10213IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10214{
10215 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10216 if (pEvtRec)
10217 {
10218 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10219 pEvtRec->u.IOPortRead.Port = Port;
10220 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10221 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10222 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10223 }
10224 pIemCpu->cIOReads++;
10225 *pu32Value = 0xcccccccc;
10226 return VINF_SUCCESS;
10227}
10228
10229
10230/**
10231 * Fakes and records an I/O port write.
10232 *
10233 * @returns VINF_SUCCESS.
10234 * @param pIemCpu The IEM per CPU data.
10235 * @param Port The I/O port.
10236 * @param u32Value The value being written.
10237 * @param cbValue The size of the access.
10238 */
10239IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10240{
10241 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10242 if (pEvtRec)
10243 {
10244 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10245 pEvtRec->u.IOPortWrite.Port = Port;
10246 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10247 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10248 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10249 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10250 }
10251 pIemCpu->cIOWrites++;
10252 return VINF_SUCCESS;
10253}
10254
10255
10256/**
10257 * Used to add extra details about a stub case.
10258 * @param pIemCpu The IEM per CPU state.
10259 */
10260IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10261{
10262 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10263 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10264 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10265 char szRegs[4096];
10266 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10267 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10268 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10269 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10270 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10271 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10272 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10273 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10274 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10275 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10276 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10277 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10278 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10279 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10280 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10281 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10282 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10283 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10284 " efer=%016VR{efer}\n"
10285 " pat=%016VR{pat}\n"
10286 " sf_mask=%016VR{sf_mask}\n"
10287 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10288 " lstar=%016VR{lstar}\n"
10289 " star=%016VR{star} cstar=%016VR{cstar}\n"
10290 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10291 );
10292
10293 char szInstr1[256];
10294 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10295 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10296 szInstr1, sizeof(szInstr1), NULL);
10297 char szInstr2[256];
10298 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10299 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10300 szInstr2, sizeof(szInstr2), NULL);
10301
10302 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10303}
10304
10305
10306/**
10307 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10308 * dump to the assertion info.
10309 *
10310 * @param pEvtRec The record to dump.
10311 */
10312IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10313{
10314 switch (pEvtRec->enmEvent)
10315 {
10316 case IEMVERIFYEVENT_IOPORT_READ:
10317 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10318 pEvtRec->u.IOPortWrite.Port,
10319 pEvtRec->u.IOPortWrite.cbValue);
10320 break;
10321 case IEMVERIFYEVENT_IOPORT_WRITE:
10322 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10323 pEvtRec->u.IOPortWrite.Port,
10324 pEvtRec->u.IOPortWrite.cbValue,
10325 pEvtRec->u.IOPortWrite.u32Value);
10326 break;
10327 case IEMVERIFYEVENT_IOPORT_STR_READ:
10328 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
10329 pEvtRec->u.IOPortStrWrite.Port,
10330 pEvtRec->u.IOPortStrWrite.cbValue,
10331 pEvtRec->u.IOPortStrWrite.cTransfers);
10332 break;
10333 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10334 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
10335 pEvtRec->u.IOPortStrWrite.Port,
10336 pEvtRec->u.IOPortStrWrite.cbValue,
10337 pEvtRec->u.IOPortStrWrite.cTransfers);
10338 break;
10339 case IEMVERIFYEVENT_RAM_READ:
10340 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10341 pEvtRec->u.RamRead.GCPhys,
10342 pEvtRec->u.RamRead.cb);
10343 break;
10344 case IEMVERIFYEVENT_RAM_WRITE:
10345 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10346 pEvtRec->u.RamWrite.GCPhys,
10347 pEvtRec->u.RamWrite.cb,
10348 (int)pEvtRec->u.RamWrite.cb,
10349 pEvtRec->u.RamWrite.ab);
10350 break;
10351 default:
10352 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10353 break;
10354 }
10355}
10356
10357
10358/**
10359 * Raises an assertion on the specified record, showing the given message with
10360 * a record dump attached.
10361 *
10362 * @param pIemCpu The IEM per CPU data.
10363 * @param pEvtRec1 The first record.
10364 * @param pEvtRec2 The second record.
10365 * @param pszMsg The message explaining why we're asserting.
10366 */
10367IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10368{
10369 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10370 iemVerifyAssertAddRecordDump(pEvtRec1);
10371 iemVerifyAssertAddRecordDump(pEvtRec2);
10372 iemVerifyAssertMsg2(pIemCpu);
10373 RTAssertPanic();
10374}
10375
10376
10377/**
10378 * Raises an assertion on the specified record, showing the given message with
10379 * a record dump attached.
10380 *
10381 * @param pIemCpu The IEM per CPU data.
10382 * @param pEvtRec1 The first record.
10383 * @param pszMsg The message explaining why we're asserting.
10384 */
10385IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10386{
10387 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10388 iemVerifyAssertAddRecordDump(pEvtRec);
10389 iemVerifyAssertMsg2(pIemCpu);
10390 RTAssertPanic();
10391}
10392
10393
10394/**
10395 * Verifies a write record.
10396 *
10397 * @param pIemCpu The IEM per CPU data.
10398 * @param pEvtRec The write record.
10399 * @param fRem Set if REM was doing the other executing. If clear
10400 * it was HM.
10401 */
10402IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10403{
10404 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10405 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10406 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10407 if ( RT_FAILURE(rc)
10408 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10409 {
10410 /* fend off ins */
10411 if ( !pIemCpu->cIOReads
10412 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10413 || ( pEvtRec->u.RamWrite.cb != 1
10414 && pEvtRec->u.RamWrite.cb != 2
10415 && pEvtRec->u.RamWrite.cb != 4) )
10416 {
10417 /* fend off ROMs and MMIO */
10418 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10419 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10420 {
10421 /* fend off fxsave */
10422 if (pEvtRec->u.RamWrite.cb != 512)
10423 {
10424 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10425 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10426 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10427 RTAssertMsg2Add("%s: %.*Rhxs\n"
10428 "iem: %.*Rhxs\n",
10429 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10430 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10431 iemVerifyAssertAddRecordDump(pEvtRec);
10432 iemVerifyAssertMsg2(pIemCpu);
10433 RTAssertPanic();
10434 }
10435 }
10436 }
10437 }
10438
10439}
10440
10441/**
10442 * Performs the post-execution verfication checks.
10443 */
10444IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10445{
10446 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10447 return;
10448
10449 /*
10450 * Switch back the state.
10451 */
10452 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10453 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10454 Assert(pOrgCtx != pDebugCtx);
10455 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10456
10457 /*
10458 * Execute the instruction in REM.
10459 */
10460 bool fRem = false;
10461 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10462 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10463 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10464#ifdef IEM_VERIFICATION_MODE_FULL_HM
10465 if ( HMIsEnabled(pVM)
10466 && pIemCpu->cIOReads == 0
10467 && pIemCpu->cIOWrites == 0
10468 && !pIemCpu->fProblematicMemory)
10469 {
10470 uint64_t uStartRip = pOrgCtx->rip;
10471 unsigned iLoops = 0;
10472 do
10473 {
10474 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10475 iLoops++;
10476 } while ( rc == VINF_SUCCESS
10477 || ( rc == VINF_EM_DBG_STEPPED
10478 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10479 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10480 || ( pOrgCtx->rip != pDebugCtx->rip
10481 && pIemCpu->uInjectCpl != UINT8_MAX
10482 && iLoops < 8) );
10483 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10484 rc = VINF_SUCCESS;
10485 }
10486#endif
10487 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10488 || rc == VINF_IOM_R3_IOPORT_READ
10489 || rc == VINF_IOM_R3_IOPORT_WRITE
10490 || rc == VINF_IOM_R3_MMIO_READ
10491 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10492 || rc == VINF_IOM_R3_MMIO_WRITE
10493 || rc == VINF_CPUM_R3_MSR_READ
10494 || rc == VINF_CPUM_R3_MSR_WRITE
10495 || rc == VINF_EM_RESCHEDULE
10496 )
10497 {
10498 EMRemLock(pVM);
10499 rc = REMR3EmulateInstruction(pVM, pVCpu);
10500 AssertRC(rc);
10501 EMRemUnlock(pVM);
10502 fRem = true;
10503 }
10504
10505 /*
10506 * Compare the register states.
10507 */
10508 unsigned cDiffs = 0;
10509 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10510 {
10511 //Log(("REM and IEM ends up with different registers!\n"));
10512 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10513
10514# define CHECK_FIELD(a_Field) \
10515 do \
10516 { \
10517 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10518 { \
10519 switch (sizeof(pOrgCtx->a_Field)) \
10520 { \
10521 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10522 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10523 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10524 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10525 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10526 } \
10527 cDiffs++; \
10528 } \
10529 } while (0)
10530# define CHECK_XSTATE_FIELD(a_Field) \
10531 do \
10532 { \
10533 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10534 { \
10535 switch (sizeof(pOrgXState->a_Field)) \
10536 { \
10537 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10538 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10539 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10540 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10541 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10542 } \
10543 cDiffs++; \
10544 } \
10545 } while (0)
10546
10547# define CHECK_BIT_FIELD(a_Field) \
10548 do \
10549 { \
10550 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10551 { \
10552 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10553 cDiffs++; \
10554 } \
10555 } while (0)
10556
10557# define CHECK_SEL(a_Sel) \
10558 do \
10559 { \
10560 CHECK_FIELD(a_Sel.Sel); \
10561 CHECK_FIELD(a_Sel.Attr.u); \
10562 CHECK_FIELD(a_Sel.u64Base); \
10563 CHECK_FIELD(a_Sel.u32Limit); \
10564 CHECK_FIELD(a_Sel.fFlags); \
10565 } while (0)
10566
10567 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10568 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10569
10570#if 1 /* The recompiler doesn't update these the intel way. */
10571 if (fRem)
10572 {
10573 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10574 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10575 pOrgXState->x87.CS = pDebugXState->x87.CS;
10576 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10577 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10578 pOrgXState->x87.DS = pDebugXState->x87.DS;
10579 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10580 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10581 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10582 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10583 }
10584#endif
10585 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10586 {
10587 RTAssertMsg2Weak(" the FPU state differs\n");
10588 cDiffs++;
10589 CHECK_XSTATE_FIELD(x87.FCW);
10590 CHECK_XSTATE_FIELD(x87.FSW);
10591 CHECK_XSTATE_FIELD(x87.FTW);
10592 CHECK_XSTATE_FIELD(x87.FOP);
10593 CHECK_XSTATE_FIELD(x87.FPUIP);
10594 CHECK_XSTATE_FIELD(x87.CS);
10595 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10596 CHECK_XSTATE_FIELD(x87.FPUDP);
10597 CHECK_XSTATE_FIELD(x87.DS);
10598 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10599 CHECK_XSTATE_FIELD(x87.MXCSR);
10600 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10601 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10602 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10603 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10604 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10605 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10606 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10607 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10608 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10609 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10610 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10611 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10612 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10613 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10614 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10615 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10616 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10617 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10618 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10619 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10620 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10621 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10622 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10623 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10624 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10625 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10626 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10627 }
10628 CHECK_FIELD(rip);
10629 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10630 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10631 {
10632 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10633 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10634 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10635 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10636 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10637 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10638 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10639 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10640 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10641 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10642 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10643 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10644 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10645 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10646 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10647 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10648 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10649 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10650 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10651 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10652 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10653 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10654 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10655 }
10656
10657 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10658 CHECK_FIELD(rax);
10659 CHECK_FIELD(rcx);
10660 if (!pIemCpu->fIgnoreRaxRdx)
10661 CHECK_FIELD(rdx);
10662 CHECK_FIELD(rbx);
10663 CHECK_FIELD(rsp);
10664 CHECK_FIELD(rbp);
10665 CHECK_FIELD(rsi);
10666 CHECK_FIELD(rdi);
10667 CHECK_FIELD(r8);
10668 CHECK_FIELD(r9);
10669 CHECK_FIELD(r10);
10670 CHECK_FIELD(r11);
10671 CHECK_FIELD(r12);
10672 CHECK_FIELD(r13);
10673 CHECK_SEL(cs);
10674 CHECK_SEL(ss);
10675 CHECK_SEL(ds);
10676 CHECK_SEL(es);
10677 CHECK_SEL(fs);
10678 CHECK_SEL(gs);
10679 CHECK_FIELD(cr0);
10680
10681 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10682 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10683 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10684 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10685 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10686 {
10687 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10688 { /* ignore */ }
10689 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10690 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10691 && fRem)
10692 { /* ignore */ }
10693 else
10694 CHECK_FIELD(cr2);
10695 }
10696 CHECK_FIELD(cr3);
10697 CHECK_FIELD(cr4);
10698 CHECK_FIELD(dr[0]);
10699 CHECK_FIELD(dr[1]);
10700 CHECK_FIELD(dr[2]);
10701 CHECK_FIELD(dr[3]);
10702 CHECK_FIELD(dr[6]);
10703 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10704 CHECK_FIELD(dr[7]);
10705 CHECK_FIELD(gdtr.cbGdt);
10706 CHECK_FIELD(gdtr.pGdt);
10707 CHECK_FIELD(idtr.cbIdt);
10708 CHECK_FIELD(idtr.pIdt);
10709 CHECK_SEL(ldtr);
10710 CHECK_SEL(tr);
10711 CHECK_FIELD(SysEnter.cs);
10712 CHECK_FIELD(SysEnter.eip);
10713 CHECK_FIELD(SysEnter.esp);
10714 CHECK_FIELD(msrEFER);
10715 CHECK_FIELD(msrSTAR);
10716 CHECK_FIELD(msrPAT);
10717 CHECK_FIELD(msrLSTAR);
10718 CHECK_FIELD(msrCSTAR);
10719 CHECK_FIELD(msrSFMASK);
10720 CHECK_FIELD(msrKERNELGSBASE);
10721
10722 if (cDiffs != 0)
10723 {
10724 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10725 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10726 iemVerifyAssertMsg2(pIemCpu);
10727 RTAssertPanic();
10728 }
10729# undef CHECK_FIELD
10730# undef CHECK_BIT_FIELD
10731 }
10732
10733 /*
10734 * If the register state compared fine, check the verification event
10735 * records.
10736 */
10737 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10738 {
10739 /*
10740 * Compare verficiation event records.
10741 * - I/O port accesses should be a 1:1 match.
10742 */
10743 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10744 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10745 while (pIemRec && pOtherRec)
10746 {
10747 /* Since we might miss RAM writes and reads, ignore reads and check
10748 that any written memory is the same extra ones. */
10749 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10750 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10751 && pIemRec->pNext)
10752 {
10753 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10754 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10755 pIemRec = pIemRec->pNext;
10756 }
10757
10758 /* Do the compare. */
10759 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10760 {
10761 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10762 break;
10763 }
10764 bool fEquals;
10765 switch (pIemRec->enmEvent)
10766 {
10767 case IEMVERIFYEVENT_IOPORT_READ:
10768 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10769 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10770 break;
10771 case IEMVERIFYEVENT_IOPORT_WRITE:
10772 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10773 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10774 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10775 break;
10776 case IEMVERIFYEVENT_IOPORT_STR_READ:
10777 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
10778 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
10779 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
10780 break;
10781 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10782 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
10783 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
10784 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
10785 break;
10786 case IEMVERIFYEVENT_RAM_READ:
10787 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10788 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10789 break;
10790 case IEMVERIFYEVENT_RAM_WRITE:
10791 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10792 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10793 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10794 break;
10795 default:
10796 fEquals = false;
10797 break;
10798 }
10799 if (!fEquals)
10800 {
10801 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10802 break;
10803 }
10804
10805 /* advance */
10806 pIemRec = pIemRec->pNext;
10807 pOtherRec = pOtherRec->pNext;
10808 }
10809
10810 /* Ignore extra writes and reads. */
10811 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10812 {
10813 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10814 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10815 pIemRec = pIemRec->pNext;
10816 }
10817 if (pIemRec != NULL)
10818 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10819 else if (pOtherRec != NULL)
10820 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10821 }
10822 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10823}
10824
10825#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10826
10827/* stubs */
10828IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10829{
10830 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10831 return VERR_INTERNAL_ERROR;
10832}
10833
10834IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10835{
10836 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10837 return VERR_INTERNAL_ERROR;
10838}
10839
10840#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10841
10842
10843#ifdef LOG_ENABLED
10844/**
10845 * Logs the current instruction.
10846 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10847 * @param pCtx The current CPU context.
10848 * @param fSameCtx Set if we have the same context information as the VMM,
10849 * clear if we may have already executed an instruction in
10850 * our debug context. When clear, we assume IEMCPU holds
10851 * valid CPU mode info.
10852 */
10853IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10854{
10855# ifdef IN_RING3
10856 if (LogIs2Enabled())
10857 {
10858 char szInstr[256];
10859 uint32_t cbInstr = 0;
10860 if (fSameCtx)
10861 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10862 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10863 szInstr, sizeof(szInstr), &cbInstr);
10864 else
10865 {
10866 uint32_t fFlags = 0;
10867 switch (pVCpu->iem.s.enmCpuMode)
10868 {
10869 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10870 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10871 case IEMMODE_16BIT:
10872 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10873 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10874 else
10875 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10876 break;
10877 }
10878 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10879 szInstr, sizeof(szInstr), &cbInstr);
10880 }
10881
10882 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10883 Log2(("****\n"
10884 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10885 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10886 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10887 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10888 " %s\n"
10889 ,
10890 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10891 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10892 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10893 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10894 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10895 szInstr));
10896
10897 if (LogIs3Enabled())
10898 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10899 }
10900 else
10901# endif
10902 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10903 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10904}
10905#endif
10906
10907
10908/**
10909 * Makes status code addjustments (pass up from I/O and access handler)
10910 * as well as maintaining statistics.
10911 *
10912 * @returns Strict VBox status code to pass up.
10913 * @param pIemCpu The IEM per CPU data.
10914 * @param rcStrict The status from executing an instruction.
10915 */
10916DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10917{
10918 if (rcStrict != VINF_SUCCESS)
10919 {
10920 if (RT_SUCCESS(rcStrict))
10921 {
10922 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10923 || rcStrict == VINF_IOM_R3_IOPORT_READ
10924 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10925 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
10926 || rcStrict == VINF_IOM_R3_MMIO_READ
10927 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10928 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10929 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
10930 || rcStrict == VINF_CPUM_R3_MSR_READ
10931 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10932 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10933 || rcStrict == VINF_EM_RAW_TO_R3
10934 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
10935 /* raw-mode / virt handlers only: */
10936 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10937 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10938 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10939 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10940 || rcStrict == VINF_SELM_SYNC_GDT
10941 || rcStrict == VINF_CSAM_PENDING_ACTION
10942 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10943 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10944/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10945 int32_t const rcPassUp = pIemCpu->rcPassUp;
10946 if (rcPassUp == VINF_SUCCESS)
10947 pIemCpu->cRetInfStatuses++;
10948 else if ( rcPassUp < VINF_EM_FIRST
10949 || rcPassUp > VINF_EM_LAST
10950 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10951 {
10952 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10953 pIemCpu->cRetPassUpStatus++;
10954 rcStrict = rcPassUp;
10955 }
10956 else
10957 {
10958 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10959 pIemCpu->cRetInfStatuses++;
10960 }
10961 }
10962 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10963 pIemCpu->cRetAspectNotImplemented++;
10964 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10965 pIemCpu->cRetInstrNotImplemented++;
10966#ifdef IEM_VERIFICATION_MODE_FULL
10967 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10968 rcStrict = VINF_SUCCESS;
10969#endif
10970 else
10971 pIemCpu->cRetErrStatuses++;
10972 }
10973 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10974 {
10975 pIemCpu->cRetPassUpStatus++;
10976 rcStrict = pIemCpu->rcPassUp;
10977 }
10978
10979 return rcStrict;
10980}
10981
10982
10983/**
10984 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10985 * IEMExecOneWithPrefetchedByPC.
10986 *
10987 * @return Strict VBox status code.
10988 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10989 * @param pIemCpu The IEM per CPU data.
10990 * @param fExecuteInhibit If set, execute the instruction following CLI,
10991 * POP SS and MOV SS,GR.
10992 */
10993DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10994{
10995 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10996 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10997 if (rcStrict == VINF_SUCCESS)
10998 pIemCpu->cInstructions++;
10999 if (pIemCpu->cActiveMappings > 0)
11000 iemMemRollback(pIemCpu);
11001//#ifdef DEBUG
11002// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
11003//#endif
11004
11005 /* Execute the next instruction as well if a cli, pop ss or
11006 mov ss, Gr has just completed successfully. */
11007 if ( fExecuteInhibit
11008 && rcStrict == VINF_SUCCESS
11009 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
11010 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
11011 {
11012 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
11013 if (rcStrict == VINF_SUCCESS)
11014 {
11015# ifdef LOG_ENABLED
11016 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
11017# endif
11018 IEM_OPCODE_GET_NEXT_U8(&b);
11019 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
11020 if (rcStrict == VINF_SUCCESS)
11021 pIemCpu->cInstructions++;
11022 if (pIemCpu->cActiveMappings > 0)
11023 iemMemRollback(pIemCpu);
11024 }
11025 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
11026 }
11027
11028 /*
11029 * Return value fiddling, statistics and sanity assertions.
11030 */
11031 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11032
11033 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
11034 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
11035#if defined(IEM_VERIFICATION_MODE_FULL)
11036 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
11037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
11038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
11039 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
11040#endif
11041 return rcStrict;
11042}
11043
11044
11045#ifdef IN_RC
11046/**
11047 * Re-enters raw-mode or ensure we return to ring-3.
11048 *
11049 * @returns rcStrict, maybe modified.
11050 * @param pIemCpu The IEM CPU structure.
11051 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11052 * @param pCtx The current CPU context.
11053 * @param rcStrict The status code returne by the interpreter.
11054 */
11055DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
11056{
11057 if (!pIemCpu->fInPatchCode)
11058 CPUMRawEnter(pVCpu);
11059 return rcStrict;
11060}
11061#endif
11062
11063
11064/**
11065 * Execute one instruction.
11066 *
11067 * @return Strict VBox status code.
11068 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11069 */
11070VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
11071{
11072 PIEMCPU pIemCpu = &pVCpu->iem.s;
11073
11074#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11075 iemExecVerificationModeSetup(pIemCpu);
11076#endif
11077#ifdef LOG_ENABLED
11078 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11079 iemLogCurInstr(pVCpu, pCtx, true);
11080#endif
11081
11082 /*
11083 * Do the decoding and emulation.
11084 */
11085 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11086 if (rcStrict == VINF_SUCCESS)
11087 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11088
11089#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11090 /*
11091 * Assert some sanity.
11092 */
11093 iemExecVerificationModeCheck(pIemCpu);
11094#endif
11095#ifdef IN_RC
11096 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11097#endif
11098 if (rcStrict != VINF_SUCCESS)
11099 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11100 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11101 return rcStrict;
11102}
11103
11104
11105VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11106{
11107 PIEMCPU pIemCpu = &pVCpu->iem.s;
11108 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11109 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11110
11111 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11112 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11113 if (rcStrict == VINF_SUCCESS)
11114 {
11115 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11116 if (pcbWritten)
11117 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11118 }
11119
11120#ifdef IN_RC
11121 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11122#endif
11123 return rcStrict;
11124}
11125
11126
11127VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11128 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11129{
11130 PIEMCPU pIemCpu = &pVCpu->iem.s;
11131 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11132 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11133
11134 VBOXSTRICTRC rcStrict;
11135 if ( cbOpcodeBytes
11136 && pCtx->rip == OpcodeBytesPC)
11137 {
11138 iemInitDecoder(pIemCpu, false);
11139 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11140 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11141 rcStrict = VINF_SUCCESS;
11142 }
11143 else
11144 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11145 if (rcStrict == VINF_SUCCESS)
11146 {
11147 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11148 }
11149
11150#ifdef IN_RC
11151 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11152#endif
11153 return rcStrict;
11154}
11155
11156
11157VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11158{
11159 PIEMCPU pIemCpu = &pVCpu->iem.s;
11160 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11161 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11162
11163 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11164 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11165 if (rcStrict == VINF_SUCCESS)
11166 {
11167 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11168 if (pcbWritten)
11169 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11170 }
11171
11172#ifdef IN_RC
11173 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11174#endif
11175 return rcStrict;
11176}
11177
11178
11179VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11180 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11181{
11182 PIEMCPU pIemCpu = &pVCpu->iem.s;
11183 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11184 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11185
11186 VBOXSTRICTRC rcStrict;
11187 if ( cbOpcodeBytes
11188 && pCtx->rip == OpcodeBytesPC)
11189 {
11190 iemInitDecoder(pIemCpu, true);
11191 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11192 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11193 rcStrict = VINF_SUCCESS;
11194 }
11195 else
11196 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11197 if (rcStrict == VINF_SUCCESS)
11198 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11199
11200#ifdef IN_RC
11201 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11202#endif
11203 return rcStrict;
11204}
11205
11206
11207VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11208{
11209 PIEMCPU pIemCpu = &pVCpu->iem.s;
11210
11211 /*
11212 * See if there is an interrupt pending in TRPM and inject it if we can.
11213 */
11214#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11215 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11216# ifdef IEM_VERIFICATION_MODE_FULL
11217 pIemCpu->uInjectCpl = UINT8_MAX;
11218# endif
11219 if ( pCtx->eflags.Bits.u1IF
11220 && TRPMHasTrap(pVCpu)
11221 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11222 {
11223 uint8_t u8TrapNo;
11224 TRPMEVENT enmType;
11225 RTGCUINT uErrCode;
11226 RTGCPTR uCr2;
11227 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11228 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11229 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11230 TRPMResetTrap(pVCpu);
11231 }
11232#else
11233 iemExecVerificationModeSetup(pIemCpu);
11234 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11235#endif
11236
11237 /*
11238 * Log the state.
11239 */
11240#ifdef LOG_ENABLED
11241 iemLogCurInstr(pVCpu, pCtx, true);
11242#endif
11243
11244 /*
11245 * Do the decoding and emulation.
11246 */
11247 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11248 if (rcStrict == VINF_SUCCESS)
11249 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11250
11251#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11252 /*
11253 * Assert some sanity.
11254 */
11255 iemExecVerificationModeCheck(pIemCpu);
11256#endif
11257
11258 /*
11259 * Maybe re-enter raw-mode and log.
11260 */
11261#ifdef IN_RC
11262 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11263#endif
11264 if (rcStrict != VINF_SUCCESS)
11265 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11266 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11267 return rcStrict;
11268}
11269
11270
11271
11272/**
11273 * Injects a trap, fault, abort, software interrupt or external interrupt.
11274 *
11275 * The parameter list matches TRPMQueryTrapAll pretty closely.
11276 *
11277 * @returns Strict VBox status code.
11278 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11279 * @param u8TrapNo The trap number.
11280 * @param enmType What type is it (trap/fault/abort), software
11281 * interrupt or hardware interrupt.
11282 * @param uErrCode The error code if applicable.
11283 * @param uCr2 The CR2 value if applicable.
11284 * @param cbInstr The instruction length (only relevant for
11285 * software interrupts).
11286 */
11287VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11288 uint8_t cbInstr)
11289{
11290 iemInitDecoder(&pVCpu->iem.s, false);
11291#ifdef DBGFTRACE_ENABLED
11292 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11293 u8TrapNo, enmType, uErrCode, uCr2);
11294#endif
11295
11296 uint32_t fFlags;
11297 switch (enmType)
11298 {
11299 case TRPM_HARDWARE_INT:
11300 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11301 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11302 uErrCode = uCr2 = 0;
11303 break;
11304
11305 case TRPM_SOFTWARE_INT:
11306 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11307 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11308 uErrCode = uCr2 = 0;
11309 break;
11310
11311 case TRPM_TRAP:
11312 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11313 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11314 if (u8TrapNo == X86_XCPT_PF)
11315 fFlags |= IEM_XCPT_FLAGS_CR2;
11316 switch (u8TrapNo)
11317 {
11318 case X86_XCPT_DF:
11319 case X86_XCPT_TS:
11320 case X86_XCPT_NP:
11321 case X86_XCPT_SS:
11322 case X86_XCPT_PF:
11323 case X86_XCPT_AC:
11324 fFlags |= IEM_XCPT_FLAGS_ERR;
11325 break;
11326
11327 case X86_XCPT_NMI:
11328 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11329 break;
11330 }
11331 break;
11332
11333 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11334 }
11335
11336 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11337}
11338
11339
11340/**
11341 * Injects the active TRPM event.
11342 *
11343 * @returns Strict VBox status code.
11344 * @param pVCpu The cross context virtual CPU structure.
11345 */
11346VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11347{
11348#ifndef IEM_IMPLEMENTS_TASKSWITCH
11349 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11350#else
11351 uint8_t u8TrapNo;
11352 TRPMEVENT enmType;
11353 RTGCUINT uErrCode;
11354 RTGCUINTPTR uCr2;
11355 uint8_t cbInstr;
11356 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11357 if (RT_FAILURE(rc))
11358 return rc;
11359
11360 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11361
11362 /** @todo Are there any other codes that imply the event was successfully
11363 * delivered to the guest? See @bugref{6607}. */
11364 if ( rcStrict == VINF_SUCCESS
11365 || rcStrict == VINF_IEM_RAISED_XCPT)
11366 {
11367 TRPMResetTrap(pVCpu);
11368 }
11369 return rcStrict;
11370#endif
11371}
11372
11373
11374VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11375{
11376 return VERR_NOT_IMPLEMENTED;
11377}
11378
11379
11380VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11381{
11382 return VERR_NOT_IMPLEMENTED;
11383}
11384
11385
11386#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11387/**
11388 * Executes a IRET instruction with default operand size.
11389 *
11390 * This is for PATM.
11391 *
11392 * @returns VBox status code.
11393 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11394 * @param pCtxCore The register frame.
11395 */
11396VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11397{
11398 PIEMCPU pIemCpu = &pVCpu->iem.s;
11399 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11400
11401 iemCtxCoreToCtx(pCtx, pCtxCore);
11402 iemInitDecoder(pIemCpu);
11403 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11404 if (rcStrict == VINF_SUCCESS)
11405 iemCtxToCtxCore(pCtxCore, pCtx);
11406 else
11407 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11408 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11409 return rcStrict;
11410}
11411#endif
11412
11413
11414/**
11415 * Macro used by the IEMExec* method to check the given instruction length.
11416 *
11417 * Will return on failure!
11418 *
11419 * @param a_cbInstr The given instruction length.
11420 * @param a_cbMin The minimum length.
11421 */
11422#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11423 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11424 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11425
11426
11427/**
11428 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11429 *
11430 * This API ASSUMES that the caller has already verified that the guest code is
11431 * allowed to access the I/O port. (The I/O port is in the DX register in the
11432 * guest state.)
11433 *
11434 * @returns Strict VBox status code.
11435 * @param pVCpu The cross context virtual CPU structure.
11436 * @param cbValue The size of the I/O port access (1, 2, or 4).
11437 * @param enmAddrMode The addressing mode.
11438 * @param fRepPrefix Indicates whether a repeat prefix is used
11439 * (doesn't matter which for this instruction).
11440 * @param cbInstr The instruction length in bytes.
11441 * @param iEffSeg The effective segment address.
11442 */
11443VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11444 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11445{
11446 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11447 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11448
11449 /*
11450 * State init.
11451 */
11452 PIEMCPU pIemCpu = &pVCpu->iem.s;
11453 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11454
11455 /*
11456 * Switch orgy for getting to the right handler.
11457 */
11458 VBOXSTRICTRC rcStrict;
11459 if (fRepPrefix)
11460 {
11461 switch (enmAddrMode)
11462 {
11463 case IEMMODE_16BIT:
11464 switch (cbValue)
11465 {
11466 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11467 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11468 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11469 default:
11470 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11471 }
11472 break;
11473
11474 case IEMMODE_32BIT:
11475 switch (cbValue)
11476 {
11477 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11478 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11479 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11480 default:
11481 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11482 }
11483 break;
11484
11485 case IEMMODE_64BIT:
11486 switch (cbValue)
11487 {
11488 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11489 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11490 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11491 default:
11492 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11493 }
11494 break;
11495
11496 default:
11497 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11498 }
11499 }
11500 else
11501 {
11502 switch (enmAddrMode)
11503 {
11504 case IEMMODE_16BIT:
11505 switch (cbValue)
11506 {
11507 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11508 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11509 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11510 default:
11511 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11512 }
11513 break;
11514
11515 case IEMMODE_32BIT:
11516 switch (cbValue)
11517 {
11518 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11519 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11520 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11521 default:
11522 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11523 }
11524 break;
11525
11526 case IEMMODE_64BIT:
11527 switch (cbValue)
11528 {
11529 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11530 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11531 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11532 default:
11533 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11534 }
11535 break;
11536
11537 default:
11538 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11539 }
11540 }
11541
11542 iemUninitExec(pIemCpu);
11543 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11544}
11545
11546
11547/**
11548 * Interface for HM and EM for executing string I/O IN (read) instructions.
11549 *
11550 * This API ASSUMES that the caller has already verified that the guest code is
11551 * allowed to access the I/O port. (The I/O port is in the DX register in the
11552 * guest state.)
11553 *
11554 * @returns Strict VBox status code.
11555 * @param pVCpu The cross context virtual CPU structure.
11556 * @param cbValue The size of the I/O port access (1, 2, or 4).
11557 * @param enmAddrMode The addressing mode.
11558 * @param fRepPrefix Indicates whether a repeat prefix is used
11559 * (doesn't matter which for this instruction).
11560 * @param cbInstr The instruction length in bytes.
11561 */
11562VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11563 bool fRepPrefix, uint8_t cbInstr)
11564{
11565 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11566
11567 /*
11568 * State init.
11569 */
11570 PIEMCPU pIemCpu = &pVCpu->iem.s;
11571 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11572
11573 /*
11574 * Switch orgy for getting to the right handler.
11575 */
11576 VBOXSTRICTRC rcStrict;
11577 if (fRepPrefix)
11578 {
11579 switch (enmAddrMode)
11580 {
11581 case IEMMODE_16BIT:
11582 switch (cbValue)
11583 {
11584 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11585 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11586 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11587 default:
11588 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11589 }
11590 break;
11591
11592 case IEMMODE_32BIT:
11593 switch (cbValue)
11594 {
11595 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11596 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11597 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11598 default:
11599 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11600 }
11601 break;
11602
11603 case IEMMODE_64BIT:
11604 switch (cbValue)
11605 {
11606 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11607 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11608 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11609 default:
11610 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11611 }
11612 break;
11613
11614 default:
11615 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11616 }
11617 }
11618 else
11619 {
11620 switch (enmAddrMode)
11621 {
11622 case IEMMODE_16BIT:
11623 switch (cbValue)
11624 {
11625 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11626 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11627 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11628 default:
11629 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11630 }
11631 break;
11632
11633 case IEMMODE_32BIT:
11634 switch (cbValue)
11635 {
11636 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11637 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11638 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11639 default:
11640 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11641 }
11642 break;
11643
11644 case IEMMODE_64BIT:
11645 switch (cbValue)
11646 {
11647 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11648 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11649 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11650 default:
11651 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11652 }
11653 break;
11654
11655 default:
11656 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11657 }
11658 }
11659
11660 iemUninitExec(pIemCpu);
11661 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11662}
11663
11664
11665
11666/**
11667 * Interface for HM and EM to write to a CRx register.
11668 *
11669 * @returns Strict VBox status code.
11670 * @param pVCpu The cross context virtual CPU structure.
11671 * @param cbInstr The instruction length in bytes.
11672 * @param iCrReg The control register number (destination).
11673 * @param iGReg The general purpose register number (source).
11674 *
11675 * @remarks In ring-0 not all of the state needs to be synced in.
11676 */
11677VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11678{
11679 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11680 Assert(iCrReg < 16);
11681 Assert(iGReg < 16);
11682
11683 PIEMCPU pIemCpu = &pVCpu->iem.s;
11684 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11685 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11686 iemUninitExec(pIemCpu);
11687 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11688}
11689
11690
11691/**
11692 * Interface for HM and EM to read from a CRx register.
11693 *
11694 * @returns Strict VBox status code.
11695 * @param pVCpu The cross context virtual CPU structure.
11696 * @param cbInstr The instruction length in bytes.
11697 * @param iGReg The general purpose register number (destination).
11698 * @param iCrReg The control register number (source).
11699 *
11700 * @remarks In ring-0 not all of the state needs to be synced in.
11701 */
11702VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11703{
11704 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11705 Assert(iCrReg < 16);
11706 Assert(iGReg < 16);
11707
11708 PIEMCPU pIemCpu = &pVCpu->iem.s;
11709 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11710 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11711 iemUninitExec(pIemCpu);
11712 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11713}
11714
11715
11716/**
11717 * Interface for HM and EM to clear the CR0[TS] bit.
11718 *
11719 * @returns Strict VBox status code.
11720 * @param pVCpu The cross context virtual CPU structure.
11721 * @param cbInstr The instruction length in bytes.
11722 *
11723 * @remarks In ring-0 not all of the state needs to be synced in.
11724 */
11725VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11726{
11727 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11728
11729 PIEMCPU pIemCpu = &pVCpu->iem.s;
11730 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11731 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11732 iemUninitExec(pIemCpu);
11733 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11734}
11735
11736
11737/**
11738 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11739 *
11740 * @returns Strict VBox status code.
11741 * @param pVCpu The cross context virtual CPU structure.
11742 * @param cbInstr The instruction length in bytes.
11743 * @param uValue The value to load into CR0.
11744 *
11745 * @remarks In ring-0 not all of the state needs to be synced in.
11746 */
11747VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11748{
11749 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11750
11751 PIEMCPU pIemCpu = &pVCpu->iem.s;
11752 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11753 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11754 iemUninitExec(pIemCpu);
11755 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11756}
11757
11758
11759/**
11760 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11761 *
11762 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11763 *
11764 * @returns Strict VBox status code.
11765 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11766 * @param cbInstr The instruction length in bytes.
11767 * @remarks In ring-0 not all of the state needs to be synced in.
11768 * @thread EMT(pVCpu)
11769 */
11770VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11771{
11772 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11773
11774 PIEMCPU pIemCpu = &pVCpu->iem.s;
11775 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11776 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11777 iemUninitExec(pIemCpu);
11778 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11779}
11780
11781#ifdef IN_RING3
11782
11783/**
11784 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11785 *
11786 * @returns Merge between @a rcStrict and what the commit operation returned.
11787 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11788 * @param rcStrict The status code returned by ring-0 or raw-mode.
11789 */
11790VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3DoPendingAction(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
11791{
11792 PIEMCPU pIemCpu = &pVCpu->iem.s;
11793
11794 /*
11795 * Retrieve and reset the pending commit.
11796 */
11797 IEMCOMMIT const enmFn = pIemCpu->PendingCommit.enmFn;
11798 pIemCpu->PendingCommit.enmFn = IEMCOMMIT_INVALID;
11799 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11800
11801 /*
11802 * Must reset pass-up status code.
11803 */
11804 pIemCpu->rcPassUp = VINF_SUCCESS;
11805
11806 /*
11807 * Call the function. Currently using switch here instead of function
11808 * pointer table as a switch won't get skewed.
11809 */
11810 VBOXSTRICTRC rcStrictCommit;
11811 switch (enmFn)
11812 {
11813 case IEMCOMMIT_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11814 case IEMCOMMIT_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11815 case IEMCOMMIT_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11816 case IEMCOMMIT_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11817 case IEMCOMMIT_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11818 case IEMCOMMIT_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11819 case IEMCOMMIT_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11820 case IEMCOMMIT_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11821 case IEMCOMMIT_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11822 case IEMCOMMIT_REP_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11823 case IEMCOMMIT_REP_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11824 case IEMCOMMIT_REP_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11825 case IEMCOMMIT_REP_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11826 case IEMCOMMIT_REP_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11827 case IEMCOMMIT_REP_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11828 case IEMCOMMIT_REP_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11829 case IEMCOMMIT_REP_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11830 case IEMCOMMIT_REP_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11831 default:
11832 AssertLogRelMsgFailedReturn(("enmFn=%#x (%d)\n", pIemCpu->PendingCommit.enmFn, pIemCpu->PendingCommit.enmFn), VERR_IEM_IPE_2);
11833 }
11834
11835 /*
11836 * Merge status code (if any) with the incomming one.
11837 */
11838 rcStrictCommit = iemExecStatusCodeFiddling(pIemCpu, rcStrictCommit);
11839 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11840 return rcStrict;
11841 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11842 return rcStrictCommit;
11843
11844 /* Complicated. */
11845 if (RT_FAILURE(rcStrict))
11846 return rcStrict;
11847 if (RT_FAILURE(rcStrictCommit))
11848 return rcStrictCommit;
11849 if ( rcStrict >= VINF_EM_FIRST
11850 && rcStrict <= VINF_EM_LAST)
11851 {
11852 if ( rcStrictCommit >= VINF_EM_FIRST
11853 && rcStrictCommit <= VINF_EM_LAST)
11854 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11855
11856 /* This really shouldn't happen. Check PGM + handler code! */
11857 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_1);
11858 }
11859 /* This shouldn't really happen either, see IOM_SUCCESS. */
11860 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_2);
11861}
11862
11863#endif /* IN_RING */
11864
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette