VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 61142

Last change on this file since 61142 was 61068, checked in by vboxsync, 9 years ago

CPUM,IEM: FPU/SSE/AVX state and host resources APIs, first installment. This should fix the win 8.1 issue.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 464.1 KB
Line 
1/* $Id: IEMAll.cpp 61068 2016-05-20 01:24:53Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85
86/*********************************************************************************************************************************
87* Header Files *
88*********************************************************************************************************************************/
89#define LOG_GROUP LOG_GROUP_IEM
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/pdm.h>
93#include <VBox/vmm/pgm.h>
94#include <internal/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/tm.h>
99#include <VBox/vmm/dbgf.h>
100#include <VBox/vmm/dbgftrace.h>
101#ifdef VBOX_WITH_RAW_MODE_NOT_R0
102# include <VBox/vmm/patm.h>
103# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
104# include <VBox/vmm/csam.h>
105# endif
106#endif
107#include "IEMInternal.h"
108#ifdef IEM_VERIFICATION_MODE_FULL
109# include <VBox/vmm/rem.h>
110# include <VBox/vmm/mm.h>
111#endif
112#include <VBox/vmm/vm.h>
113#include <VBox/log.h>
114#include <VBox/err.h>
115#include <VBox/param.h>
116#include <VBox/dis.h>
117#include <VBox/disopcode.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123
124/*********************************************************************************************************************************
125* Structures and Typedefs *
126*********************************************************************************************************************************/
127/** @typedef PFNIEMOP
128 * Pointer to an opcode decoder function.
129 */
130
131/** @def FNIEMOP_DEF
132 * Define an opcode decoder function.
133 *
134 * We're using macors for this so that adding and removing parameters as well as
135 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
136 *
137 * @param a_Name The function name.
138 */
139
140
141#if defined(__GNUC__) && defined(RT_ARCH_X86)
142typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
143# define FNIEMOP_DEF(a_Name) \
144 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
145# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
146 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
147# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
148 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
149
150#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
151typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
152# define FNIEMOP_DEF(a_Name) \
153 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
154# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
155 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
156# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
157 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
158
159#elif defined(__GNUC__)
160typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#else
169typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
170# define FNIEMOP_DEF(a_Name) \
171 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
173 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
174# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
175 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
176
177#endif
178
179
180/**
181 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
182 */
183typedef union IEMSELDESC
184{
185 /** The legacy view. */
186 X86DESC Legacy;
187 /** The long mode view. */
188 X86DESC64 Long;
189} IEMSELDESC;
190/** Pointer to a selector descriptor table entry. */
191typedef IEMSELDESC *PIEMSELDESC;
192
193
194/*********************************************************************************************************************************
195* Defined Constants And Macros *
196*********************************************************************************************************************************/
197/** Temporary hack to disable the double execution. Will be removed in favor
198 * of a dedicated execution mode in EM. */
199//#define IEM_VERIFICATION_MODE_NO_REM
200
201/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
202 * due to GCC lacking knowledge about the value range of a switch. */
203#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
204
205/**
206 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
207 * occation.
208 */
209#ifdef LOG_ENABLED
210# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
211 do { \
212 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
213 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
214 } while (0)
215#else
216# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
217 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
218#endif
219
220/**
221 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
222 * occation using the supplied logger statement.
223 *
224 * @param a_LoggerArgs What to log on failure.
225 */
226#ifdef LOG_ENABLED
227# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
228 do { \
229 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
230 /*LogFunc(a_LoggerArgs);*/ \
231 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
232 } while (0)
233#else
234# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
235 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
236#endif
237
238/**
239 * Call an opcode decoder function.
240 *
241 * We're using macors for this so that adding and removing parameters can be
242 * done as we please. See FNIEMOP_DEF.
243 */
244#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
245
246/**
247 * Call a common opcode decoder function taking one extra argument.
248 *
249 * We're using macors for this so that adding and removing parameters can be
250 * done as we please. See FNIEMOP_DEF_1.
251 */
252#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
253
254/**
255 * Call a common opcode decoder function taking one extra argument.
256 *
257 * We're using macors for this so that adding and removing parameters can be
258 * done as we please. See FNIEMOP_DEF_1.
259 */
260#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
261
262/**
263 * Check if we're currently executing in real or virtual 8086 mode.
264 *
265 * @returns @c true if it is, @c false if not.
266 * @param a_pIemCpu The IEM state of the current CPU.
267 */
268#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
269
270/**
271 * Check if we're currently executing in virtual 8086 mode.
272 *
273 * @returns @c true if it is, @c false if not.
274 * @param a_pIemCpu The IEM state of the current CPU.
275 */
276#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
277
278/**
279 * Check if we're currently executing in long mode.
280 *
281 * @returns @c true if it is, @c false if not.
282 * @param a_pIemCpu The IEM state of the current CPU.
283 */
284#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
285
286/**
287 * Check if we're currently executing in real mode.
288 *
289 * @returns @c true if it is, @c false if not.
290 * @param a_pIemCpu The IEM state of the current CPU.
291 */
292#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
293
294/**
295 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
296 * @returns PCCPUMFEATURES
297 * @param a_pIemCpu The IEM state of the current CPU.
298 */
299#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
300
301/**
302 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
303 * @returns PCCPUMFEATURES
304 * @param a_pIemCpu The IEM state of the current CPU.
305 */
306#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
307
308/**
309 * Evaluates to true if we're presenting an Intel CPU to the guest.
310 */
311#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
312
313/**
314 * Evaluates to true if we're presenting an AMD CPU to the guest.
315 */
316#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
317
318/**
319 * Check if the address is canonical.
320 */
321#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
322
323
324/*********************************************************************************************************************************
325* Global Variables *
326*********************************************************************************************************************************/
327extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
328
329
330/** Function table for the ADD instruction. */
331IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
332{
333 iemAImpl_add_u8, iemAImpl_add_u8_locked,
334 iemAImpl_add_u16, iemAImpl_add_u16_locked,
335 iemAImpl_add_u32, iemAImpl_add_u32_locked,
336 iemAImpl_add_u64, iemAImpl_add_u64_locked
337};
338
339/** Function table for the ADC instruction. */
340IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
341{
342 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
343 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
344 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
345 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
346};
347
348/** Function table for the SUB instruction. */
349IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
350{
351 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
352 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
353 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
354 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
355};
356
357/** Function table for the SBB instruction. */
358IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
359{
360 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
361 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
362 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
363 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
364};
365
366/** Function table for the OR instruction. */
367IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
368{
369 iemAImpl_or_u8, iemAImpl_or_u8_locked,
370 iemAImpl_or_u16, iemAImpl_or_u16_locked,
371 iemAImpl_or_u32, iemAImpl_or_u32_locked,
372 iemAImpl_or_u64, iemAImpl_or_u64_locked
373};
374
375/** Function table for the XOR instruction. */
376IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
377{
378 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
379 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
380 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
381 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
382};
383
384/** Function table for the AND instruction. */
385IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
386{
387 iemAImpl_and_u8, iemAImpl_and_u8_locked,
388 iemAImpl_and_u16, iemAImpl_and_u16_locked,
389 iemAImpl_and_u32, iemAImpl_and_u32_locked,
390 iemAImpl_and_u64, iemAImpl_and_u64_locked
391};
392
393/** Function table for the CMP instruction.
394 * @remarks Making operand order ASSUMPTIONS.
395 */
396IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
397{
398 iemAImpl_cmp_u8, NULL,
399 iemAImpl_cmp_u16, NULL,
400 iemAImpl_cmp_u32, NULL,
401 iemAImpl_cmp_u64, NULL
402};
403
404/** Function table for the TEST instruction.
405 * @remarks Making operand order ASSUMPTIONS.
406 */
407IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
408{
409 iemAImpl_test_u8, NULL,
410 iemAImpl_test_u16, NULL,
411 iemAImpl_test_u32, NULL,
412 iemAImpl_test_u64, NULL
413};
414
415/** Function table for the BT instruction. */
416IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
417{
418 NULL, NULL,
419 iemAImpl_bt_u16, NULL,
420 iemAImpl_bt_u32, NULL,
421 iemAImpl_bt_u64, NULL
422};
423
424/** Function table for the BTC instruction. */
425IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
426{
427 NULL, NULL,
428 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
429 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
430 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
431};
432
433/** Function table for the BTR instruction. */
434IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
435{
436 NULL, NULL,
437 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
438 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
439 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
440};
441
442/** Function table for the BTS instruction. */
443IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
444{
445 NULL, NULL,
446 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
447 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
448 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
449};
450
451/** Function table for the BSF instruction. */
452IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
453{
454 NULL, NULL,
455 iemAImpl_bsf_u16, NULL,
456 iemAImpl_bsf_u32, NULL,
457 iemAImpl_bsf_u64, NULL
458};
459
460/** Function table for the BSR instruction. */
461IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
462{
463 NULL, NULL,
464 iemAImpl_bsr_u16, NULL,
465 iemAImpl_bsr_u32, NULL,
466 iemAImpl_bsr_u64, NULL
467};
468
469/** Function table for the IMUL instruction. */
470IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
471{
472 NULL, NULL,
473 iemAImpl_imul_two_u16, NULL,
474 iemAImpl_imul_two_u32, NULL,
475 iemAImpl_imul_two_u64, NULL
476};
477
478/** Group 1 /r lookup table. */
479IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
480{
481 &g_iemAImpl_add,
482 &g_iemAImpl_or,
483 &g_iemAImpl_adc,
484 &g_iemAImpl_sbb,
485 &g_iemAImpl_and,
486 &g_iemAImpl_sub,
487 &g_iemAImpl_xor,
488 &g_iemAImpl_cmp
489};
490
491/** Function table for the INC instruction. */
492IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
493{
494 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
495 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
496 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
497 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
498};
499
500/** Function table for the DEC instruction. */
501IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
502{
503 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
504 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
505 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
506 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
507};
508
509/** Function table for the NEG instruction. */
510IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
511{
512 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
513 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
514 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
515 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
516};
517
518/** Function table for the NOT instruction. */
519IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
520{
521 iemAImpl_not_u8, iemAImpl_not_u8_locked,
522 iemAImpl_not_u16, iemAImpl_not_u16_locked,
523 iemAImpl_not_u32, iemAImpl_not_u32_locked,
524 iemAImpl_not_u64, iemAImpl_not_u64_locked
525};
526
527
528/** Function table for the ROL instruction. */
529IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
530{
531 iemAImpl_rol_u8,
532 iemAImpl_rol_u16,
533 iemAImpl_rol_u32,
534 iemAImpl_rol_u64
535};
536
537/** Function table for the ROR instruction. */
538IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
539{
540 iemAImpl_ror_u8,
541 iemAImpl_ror_u16,
542 iemAImpl_ror_u32,
543 iemAImpl_ror_u64
544};
545
546/** Function table for the RCL instruction. */
547IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
548{
549 iemAImpl_rcl_u8,
550 iemAImpl_rcl_u16,
551 iemAImpl_rcl_u32,
552 iemAImpl_rcl_u64
553};
554
555/** Function table for the RCR instruction. */
556IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
557{
558 iemAImpl_rcr_u8,
559 iemAImpl_rcr_u16,
560 iemAImpl_rcr_u32,
561 iemAImpl_rcr_u64
562};
563
564/** Function table for the SHL instruction. */
565IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
566{
567 iemAImpl_shl_u8,
568 iemAImpl_shl_u16,
569 iemAImpl_shl_u32,
570 iemAImpl_shl_u64
571};
572
573/** Function table for the SHR instruction. */
574IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
575{
576 iemAImpl_shr_u8,
577 iemAImpl_shr_u16,
578 iemAImpl_shr_u32,
579 iemAImpl_shr_u64
580};
581
582/** Function table for the SAR instruction. */
583IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
584{
585 iemAImpl_sar_u8,
586 iemAImpl_sar_u16,
587 iemAImpl_sar_u32,
588 iemAImpl_sar_u64
589};
590
591
592/** Function table for the MUL instruction. */
593IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
594{
595 iemAImpl_mul_u8,
596 iemAImpl_mul_u16,
597 iemAImpl_mul_u32,
598 iemAImpl_mul_u64
599};
600
601/** Function table for the IMUL instruction working implicitly on rAX. */
602IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
603{
604 iemAImpl_imul_u8,
605 iemAImpl_imul_u16,
606 iemAImpl_imul_u32,
607 iemAImpl_imul_u64
608};
609
610/** Function table for the DIV instruction. */
611IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
612{
613 iemAImpl_div_u8,
614 iemAImpl_div_u16,
615 iemAImpl_div_u32,
616 iemAImpl_div_u64
617};
618
619/** Function table for the MUL instruction. */
620IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
621{
622 iemAImpl_idiv_u8,
623 iemAImpl_idiv_u16,
624 iemAImpl_idiv_u32,
625 iemAImpl_idiv_u64
626};
627
628/** Function table for the SHLD instruction */
629IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
630{
631 iemAImpl_shld_u16,
632 iemAImpl_shld_u32,
633 iemAImpl_shld_u64,
634};
635
636/** Function table for the SHRD instruction */
637IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
638{
639 iemAImpl_shrd_u16,
640 iemAImpl_shrd_u32,
641 iemAImpl_shrd_u64,
642};
643
644
645/** Function table for the PUNPCKLBW instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
647/** Function table for the PUNPCKLBD instruction */
648IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
649/** Function table for the PUNPCKLDQ instruction */
650IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
651/** Function table for the PUNPCKLQDQ instruction */
652IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
653
654/** Function table for the PUNPCKHBW instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
656/** Function table for the PUNPCKHBD instruction */
657IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
658/** Function table for the PUNPCKHDQ instruction */
659IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
660/** Function table for the PUNPCKHQDQ instruction */
661IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
662
663/** Function table for the PXOR instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
665/** Function table for the PCMPEQB instruction */
666IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
667/** Function table for the PCMPEQW instruction */
668IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
669/** Function table for the PCMPEQD instruction */
670IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
671
672
673#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
674/** What IEM just wrote. */
675uint8_t g_abIemWrote[256];
676/** How much IEM just wrote. */
677size_t g_cbIemWrote;
678#endif
679
680
681/*********************************************************************************************************************************
682* Internal Functions *
683*********************************************************************************************************************************/
684IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
685IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
686IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
687IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
688/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
689IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
691IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
692IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
693IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
694IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
695IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
696IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
698IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
699IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
700IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
701IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
702IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
703IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
706IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
707IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
708IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
709IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
710IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
711IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
712IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
713IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
714IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
715IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
716IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
717
718#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
719IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
720#endif
721IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
722IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
723
724
725
726/**
727 * Sets the pass up status.
728 *
729 * @returns VINF_SUCCESS.
730 * @param pIemCpu The per CPU IEM state of the calling thread.
731 * @param rcPassUp The pass up status. Must be informational.
732 * VINF_SUCCESS is not allowed.
733 */
734IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
735{
736 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
737
738 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
739 if (rcOldPassUp == VINF_SUCCESS)
740 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
741 /* If both are EM scheduling codes, use EM priority rules. */
742 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
743 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
744 {
745 if (rcPassUp < rcOldPassUp)
746 {
747 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
748 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
749 }
750 else
751 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
752 }
753 /* Override EM scheduling with specific status code. */
754 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
755 {
756 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
757 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
758 }
759 /* Don't override specific status code, first come first served. */
760 else
761 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
762 return VINF_SUCCESS;
763}
764
765
766/**
767 * Calculates the CPU mode.
768 *
769 * This is mainly for updating IEMCPU::enmCpuMode.
770 *
771 * @returns CPU mode.
772 * @param pCtx The register context for the CPU.
773 */
774DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
775{
776 if (CPUMIsGuestIn64BitCodeEx(pCtx))
777 return IEMMODE_64BIT;
778 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
779 return IEMMODE_32BIT;
780 return IEMMODE_16BIT;
781}
782
783
784/**
785 * Initializes the execution state.
786 *
787 * @param pIemCpu The per CPU IEM state.
788 * @param fBypassHandlers Whether to bypass access handlers.
789 *
790 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
791 * side-effects in strict builds.
792 */
793DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
794{
795 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
796 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
797
798 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
799
800#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
801 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
802 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
804 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
805 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
807 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
808 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
809#endif
810
811#ifdef VBOX_WITH_RAW_MODE_NOT_R0
812 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
813#endif
814 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
815 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
816#ifdef VBOX_STRICT
817 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
818 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
819 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
820 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
821 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
822 pIemCpu->uRexReg = 127;
823 pIemCpu->uRexB = 127;
824 pIemCpu->uRexIndex = 127;
825 pIemCpu->iEffSeg = 127;
826 pIemCpu->offOpcode = 127;
827 pIemCpu->cbOpcode = 127;
828#endif
829
830 pIemCpu->cActiveMappings = 0;
831 pIemCpu->iNextMapping = 0;
832 pIemCpu->rcPassUp = VINF_SUCCESS;
833 pIemCpu->fBypassHandlers = fBypassHandlers;
834#ifdef VBOX_WITH_RAW_MODE_NOT_R0
835 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
836 && pCtx->cs.u64Base == 0
837 && pCtx->cs.u32Limit == UINT32_MAX
838 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
839 if (!pIemCpu->fInPatchCode)
840 CPUMRawLeave(pVCpu, VINF_SUCCESS);
841#endif
842
843#ifdef IEM_VERIFICATION_MODE_FULL
844 pIemCpu->fNoRemSavedByExec = pIemCpu->fNoRem;
845 pIemCpu->fNoRem = true;
846#endif
847}
848
849
850/**
851 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
852 *
853 * @param pIemCpu The per CPU IEM state.
854 */
855DECLINLINE(void) iemUninitExec(PIEMCPU pIemCpu)
856{
857 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
858#ifdef IEM_VERIFICATION_MODE_FULL
859 pIemCpu->fNoRem = pIemCpu->fNoRemSavedByExec;
860#endif
861#ifdef VBOX_STRICT
862 pIemCpu->cbOpcode = 0;
863#else
864 NOREF(pIemCpu);
865#endif
866}
867
868
869/**
870 * Initializes the decoder state.
871 *
872 * @param pIemCpu The per CPU IEM state.
873 * @param fBypassHandlers Whether to bypass access handlers.
874 */
875DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
876{
877 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
878 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
879
880 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
881
882#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
883 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
885 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
886 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
887 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
888 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
889 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
890 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
891#endif
892
893#ifdef VBOX_WITH_RAW_MODE_NOT_R0
894 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
895#endif
896 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
897#ifdef IEM_VERIFICATION_MODE_FULL
898 if (pIemCpu->uInjectCpl != UINT8_MAX)
899 pIemCpu->uCpl = pIemCpu->uInjectCpl;
900#endif
901 IEMMODE enmMode = iemCalcCpuMode(pCtx);
902 pIemCpu->enmCpuMode = enmMode;
903 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
904 pIemCpu->enmEffAddrMode = enmMode;
905 if (enmMode != IEMMODE_64BIT)
906 {
907 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
908 pIemCpu->enmEffOpSize = enmMode;
909 }
910 else
911 {
912 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
913 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
914 }
915 pIemCpu->fPrefixes = 0;
916 pIemCpu->uRexReg = 0;
917 pIemCpu->uRexB = 0;
918 pIemCpu->uRexIndex = 0;
919 pIemCpu->iEffSeg = X86_SREG_DS;
920 pIemCpu->offOpcode = 0;
921 pIemCpu->cbOpcode = 0;
922 pIemCpu->cActiveMappings = 0;
923 pIemCpu->iNextMapping = 0;
924 pIemCpu->rcPassUp = VINF_SUCCESS;
925 pIemCpu->fBypassHandlers = fBypassHandlers;
926#ifdef VBOX_WITH_RAW_MODE_NOT_R0
927 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
928 && pCtx->cs.u64Base == 0
929 && pCtx->cs.u32Limit == UINT32_MAX
930 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
931 if (!pIemCpu->fInPatchCode)
932 CPUMRawLeave(pVCpu, VINF_SUCCESS);
933#endif
934
935#ifdef DBGFTRACE_ENABLED
936 switch (enmMode)
937 {
938 case IEMMODE_64BIT:
939 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
940 break;
941 case IEMMODE_32BIT:
942 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
943 break;
944 case IEMMODE_16BIT:
945 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
946 break;
947 }
948#endif
949}
950
951
952/**
953 * Prefetch opcodes the first time when starting executing.
954 *
955 * @returns Strict VBox status code.
956 * @param pIemCpu The IEM state.
957 * @param fBypassHandlers Whether to bypass access handlers.
958 */
959IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
960{
961#ifdef IEM_VERIFICATION_MODE_FULL
962 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
963#endif
964 iemInitDecoder(pIemCpu, fBypassHandlers);
965
966 /*
967 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
968 *
969 * First translate CS:rIP to a physical address.
970 */
971 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
972 uint32_t cbToTryRead;
973 RTGCPTR GCPtrPC;
974 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
975 {
976 cbToTryRead = PAGE_SIZE;
977 GCPtrPC = pCtx->rip;
978 if (!IEM_IS_CANONICAL(GCPtrPC))
979 return iemRaiseGeneralProtectionFault0(pIemCpu);
980 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
981 }
982 else
983 {
984 uint32_t GCPtrPC32 = pCtx->eip;
985 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
986 if (GCPtrPC32 > pCtx->cs.u32Limit)
987 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
988 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
989 if (!cbToTryRead) /* overflowed */
990 {
991 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
992 cbToTryRead = UINT32_MAX;
993 }
994 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
995 Assert(GCPtrPC <= UINT32_MAX);
996 }
997
998#ifdef VBOX_WITH_RAW_MODE_NOT_R0
999 /* Allow interpretation of patch manager code blocks since they can for
1000 instance throw #PFs for perfectly good reasons. */
1001 if (pIemCpu->fInPatchCode)
1002 {
1003 size_t cbRead = 0;
1004 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
1005 AssertRCReturn(rc, rc);
1006 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1007 return VINF_SUCCESS;
1008 }
1009#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1010
1011 RTGCPHYS GCPhys;
1012 uint64_t fFlags;
1013 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
1014 if (RT_FAILURE(rc))
1015 {
1016 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1017 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1018 }
1019 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1020 {
1021 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1022 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1023 }
1024 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1025 {
1026 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1027 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1028 }
1029 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1030 /** @todo Check reserved bits and such stuff. PGM is better at doing
1031 * that, so do it when implementing the guest virtual address
1032 * TLB... */
1033
1034#ifdef IEM_VERIFICATION_MODE_FULL
1035 /*
1036 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1037 * instruction.
1038 */
1039 /** @todo optimize this differently by not using PGMPhysRead. */
1040 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1041 pIemCpu->GCPhysOpcodes = GCPhys;
1042 if ( offPrevOpcodes < cbOldOpcodes
1043 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1044 {
1045 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1046 Assert(cbNew <= RT_ELEMENTS(pIemCpu->abOpcode));
1047 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1048 pIemCpu->cbOpcode = cbNew;
1049 return VINF_SUCCESS;
1050 }
1051#endif
1052
1053 /*
1054 * Read the bytes at this address.
1055 */
1056 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1057#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1058 size_t cbActual;
1059 if ( PATMIsEnabled(pVM)
1060 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1061 {
1062 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1063 Assert(cbActual > 0);
1064 pIemCpu->cbOpcode = (uint8_t)cbActual;
1065 }
1066 else
1067#endif
1068 {
1069 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1070 if (cbToTryRead > cbLeftOnPage)
1071 cbToTryRead = cbLeftOnPage;
1072 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1073 cbToTryRead = sizeof(pIemCpu->abOpcode);
1074
1075 if (!pIemCpu->fBypassHandlers)
1076 {
1077 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1078 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1079 { /* likely */ }
1080 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1081 {
1082 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1083 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1084 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1085 }
1086 else
1087 {
1088 Log((RT_SUCCESS(rcStrict)
1089 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1090 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1091 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1092 return rcStrict;
1093 }
1094 }
1095 else
1096 {
1097 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1098 if (RT_SUCCESS(rc))
1099 { /* likely */ }
1100 else
1101 {
1102 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1103 GCPtrPC, GCPhys, rc, cbToTryRead));
1104 return rc;
1105 }
1106 }
1107 pIemCpu->cbOpcode = cbToTryRead;
1108 }
1109
1110 return VINF_SUCCESS;
1111}
1112
1113
1114/**
1115 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1116 * exception if it fails.
1117 *
1118 * @returns Strict VBox status code.
1119 * @param pIemCpu The IEM state.
1120 * @param cbMin The minimum number of bytes relative offOpcode
1121 * that must be read.
1122 */
1123IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1124{
1125 /*
1126 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1127 *
1128 * First translate CS:rIP to a physical address.
1129 */
1130 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1131 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1132 uint32_t cbToTryRead;
1133 RTGCPTR GCPtrNext;
1134 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1135 {
1136 cbToTryRead = PAGE_SIZE;
1137 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1138 if (!IEM_IS_CANONICAL(GCPtrNext))
1139 return iemRaiseGeneralProtectionFault0(pIemCpu);
1140 }
1141 else
1142 {
1143 uint32_t GCPtrNext32 = pCtx->eip;
1144 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1145 GCPtrNext32 += pIemCpu->cbOpcode;
1146 if (GCPtrNext32 > pCtx->cs.u32Limit)
1147 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1148 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1149 if (!cbToTryRead) /* overflowed */
1150 {
1151 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1152 cbToTryRead = UINT32_MAX;
1153 /** @todo check out wrapping around the code segment. */
1154 }
1155 if (cbToTryRead < cbMin - cbLeft)
1156 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1157 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1158 }
1159
1160 /* Only read up to the end of the page, and make sure we don't read more
1161 than the opcode buffer can hold. */
1162 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1163 if (cbToTryRead > cbLeftOnPage)
1164 cbToTryRead = cbLeftOnPage;
1165 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1166 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1167/** @todo r=bird: Convert assertion into undefined opcode exception? */
1168 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1169
1170#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1171 /* Allow interpretation of patch manager code blocks since they can for
1172 instance throw #PFs for perfectly good reasons. */
1173 if (pIemCpu->fInPatchCode)
1174 {
1175 size_t cbRead = 0;
1176 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1177 AssertRCReturn(rc, rc);
1178 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1179 return VINF_SUCCESS;
1180 }
1181#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1182
1183 RTGCPHYS GCPhys;
1184 uint64_t fFlags;
1185 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1186 if (RT_FAILURE(rc))
1187 {
1188 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1189 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1190 }
1191 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1192 {
1193 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1194 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1195 }
1196 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1197 {
1198 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1199 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1200 }
1201 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1202 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1203 /** @todo Check reserved bits and such stuff. PGM is better at doing
1204 * that, so do it when implementing the guest virtual address
1205 * TLB... */
1206
1207 /*
1208 * Read the bytes at this address.
1209 *
1210 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1211 * and since PATM should only patch the start of an instruction there
1212 * should be no need to check again here.
1213 */
1214 if (!pIemCpu->fBypassHandlers)
1215 {
1216 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1217 cbToTryRead, PGMACCESSORIGIN_IEM);
1218 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1219 { /* likely */ }
1220 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1221 {
1222 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1223 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1224 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1225 }
1226 else
1227 {
1228 Log((RT_SUCCESS(rcStrict)
1229 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1230 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1231 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1232 return rcStrict;
1233 }
1234 }
1235 else
1236 {
1237 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1238 if (RT_SUCCESS(rc))
1239 { /* likely */ }
1240 else
1241 {
1242 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1243 return rc;
1244 }
1245 }
1246 pIemCpu->cbOpcode += cbToTryRead;
1247 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1248
1249 return VINF_SUCCESS;
1250}
1251
1252
1253/**
1254 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1255 *
1256 * @returns Strict VBox status code.
1257 * @param pIemCpu The IEM state.
1258 * @param pb Where to return the opcode byte.
1259 */
1260DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1261{
1262 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1263 if (rcStrict == VINF_SUCCESS)
1264 {
1265 uint8_t offOpcode = pIemCpu->offOpcode;
1266 *pb = pIemCpu->abOpcode[offOpcode];
1267 pIemCpu->offOpcode = offOpcode + 1;
1268 }
1269 else
1270 *pb = 0;
1271 return rcStrict;
1272}
1273
1274
1275/**
1276 * Fetches the next opcode byte.
1277 *
1278 * @returns Strict VBox status code.
1279 * @param pIemCpu The IEM state.
1280 * @param pu8 Where to return the opcode byte.
1281 */
1282DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1283{
1284 uint8_t const offOpcode = pIemCpu->offOpcode;
1285 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1286 {
1287 *pu8 = pIemCpu->abOpcode[offOpcode];
1288 pIemCpu->offOpcode = offOpcode + 1;
1289 return VINF_SUCCESS;
1290 }
1291 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1292}
1293
1294
1295/**
1296 * Fetches the next opcode byte, returns automatically on failure.
1297 *
1298 * @param a_pu8 Where to return the opcode byte.
1299 * @remark Implicitly references pIemCpu.
1300 */
1301#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1302 do \
1303 { \
1304 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1305 if (rcStrict2 != VINF_SUCCESS) \
1306 return rcStrict2; \
1307 } while (0)
1308
1309
1310/**
1311 * Fetches the next signed byte from the opcode stream.
1312 *
1313 * @returns Strict VBox status code.
1314 * @param pIemCpu The IEM state.
1315 * @param pi8 Where to return the signed byte.
1316 */
1317DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1318{
1319 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1320}
1321
1322
1323/**
1324 * Fetches the next signed byte from the opcode stream, returning automatically
1325 * on failure.
1326 *
1327 * @param a_pi8 Where to return the signed byte.
1328 * @remark Implicitly references pIemCpu.
1329 */
1330#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1331 do \
1332 { \
1333 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1334 if (rcStrict2 != VINF_SUCCESS) \
1335 return rcStrict2; \
1336 } while (0)
1337
1338
1339/**
1340 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1341 *
1342 * @returns Strict VBox status code.
1343 * @param pIemCpu The IEM state.
1344 * @param pu16 Where to return the opcode dword.
1345 */
1346DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1347{
1348 uint8_t u8;
1349 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1350 if (rcStrict == VINF_SUCCESS)
1351 *pu16 = (int8_t)u8;
1352 return rcStrict;
1353}
1354
1355
1356/**
1357 * Fetches the next signed byte from the opcode stream, extending it to
1358 * unsigned 16-bit.
1359 *
1360 * @returns Strict VBox status code.
1361 * @param pIemCpu The IEM state.
1362 * @param pu16 Where to return the unsigned word.
1363 */
1364DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1365{
1366 uint8_t const offOpcode = pIemCpu->offOpcode;
1367 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1368 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1369
1370 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1371 pIemCpu->offOpcode = offOpcode + 1;
1372 return VINF_SUCCESS;
1373}
1374
1375
1376/**
1377 * Fetches the next signed byte from the opcode stream and sign-extending it to
1378 * a word, returning automatically on failure.
1379 *
1380 * @param a_pu16 Where to return the word.
1381 * @remark Implicitly references pIemCpu.
1382 */
1383#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1384 do \
1385 { \
1386 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1387 if (rcStrict2 != VINF_SUCCESS) \
1388 return rcStrict2; \
1389 } while (0)
1390
1391
1392/**
1393 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1394 *
1395 * @returns Strict VBox status code.
1396 * @param pIemCpu The IEM state.
1397 * @param pu32 Where to return the opcode dword.
1398 */
1399DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1400{
1401 uint8_t u8;
1402 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1403 if (rcStrict == VINF_SUCCESS)
1404 *pu32 = (int8_t)u8;
1405 return rcStrict;
1406}
1407
1408
1409/**
1410 * Fetches the next signed byte from the opcode stream, extending it to
1411 * unsigned 32-bit.
1412 *
1413 * @returns Strict VBox status code.
1414 * @param pIemCpu The IEM state.
1415 * @param pu32 Where to return the unsigned dword.
1416 */
1417DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1418{
1419 uint8_t const offOpcode = pIemCpu->offOpcode;
1420 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1421 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1422
1423 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1424 pIemCpu->offOpcode = offOpcode + 1;
1425 return VINF_SUCCESS;
1426}
1427
1428
1429/**
1430 * Fetches the next signed byte from the opcode stream and sign-extending it to
1431 * a word, returning automatically on failure.
1432 *
1433 * @param a_pu32 Where to return the word.
1434 * @remark Implicitly references pIemCpu.
1435 */
1436#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1437 do \
1438 { \
1439 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1440 if (rcStrict2 != VINF_SUCCESS) \
1441 return rcStrict2; \
1442 } while (0)
1443
1444
1445/**
1446 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1447 *
1448 * @returns Strict VBox status code.
1449 * @param pIemCpu The IEM state.
1450 * @param pu64 Where to return the opcode qword.
1451 */
1452DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1453{
1454 uint8_t u8;
1455 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1456 if (rcStrict == VINF_SUCCESS)
1457 *pu64 = (int8_t)u8;
1458 return rcStrict;
1459}
1460
1461
1462/**
1463 * Fetches the next signed byte from the opcode stream, extending it to
1464 * unsigned 64-bit.
1465 *
1466 * @returns Strict VBox status code.
1467 * @param pIemCpu The IEM state.
1468 * @param pu64 Where to return the unsigned qword.
1469 */
1470DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1471{
1472 uint8_t const offOpcode = pIemCpu->offOpcode;
1473 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1474 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1475
1476 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1477 pIemCpu->offOpcode = offOpcode + 1;
1478 return VINF_SUCCESS;
1479}
1480
1481
1482/**
1483 * Fetches the next signed byte from the opcode stream and sign-extending it to
1484 * a word, returning automatically on failure.
1485 *
1486 * @param a_pu64 Where to return the word.
1487 * @remark Implicitly references pIemCpu.
1488 */
1489#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1490 do \
1491 { \
1492 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1493 if (rcStrict2 != VINF_SUCCESS) \
1494 return rcStrict2; \
1495 } while (0)
1496
1497
1498/**
1499 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1500 *
1501 * @returns Strict VBox status code.
1502 * @param pIemCpu The IEM state.
1503 * @param pu16 Where to return the opcode word.
1504 */
1505DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1506{
1507 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1508 if (rcStrict == VINF_SUCCESS)
1509 {
1510 uint8_t offOpcode = pIemCpu->offOpcode;
1511 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1512 pIemCpu->offOpcode = offOpcode + 2;
1513 }
1514 else
1515 *pu16 = 0;
1516 return rcStrict;
1517}
1518
1519
1520/**
1521 * Fetches the next opcode word.
1522 *
1523 * @returns Strict VBox status code.
1524 * @param pIemCpu The IEM state.
1525 * @param pu16 Where to return the opcode word.
1526 */
1527DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1528{
1529 uint8_t const offOpcode = pIemCpu->offOpcode;
1530 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1531 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1532
1533 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1534 pIemCpu->offOpcode = offOpcode + 2;
1535 return VINF_SUCCESS;
1536}
1537
1538
1539/**
1540 * Fetches the next opcode word, returns automatically on failure.
1541 *
1542 * @param a_pu16 Where to return the opcode word.
1543 * @remark Implicitly references pIemCpu.
1544 */
1545#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1546 do \
1547 { \
1548 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1549 if (rcStrict2 != VINF_SUCCESS) \
1550 return rcStrict2; \
1551 } while (0)
1552
1553
1554/**
1555 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1556 *
1557 * @returns Strict VBox status code.
1558 * @param pIemCpu The IEM state.
1559 * @param pu32 Where to return the opcode double word.
1560 */
1561DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1562{
1563 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1564 if (rcStrict == VINF_SUCCESS)
1565 {
1566 uint8_t offOpcode = pIemCpu->offOpcode;
1567 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1568 pIemCpu->offOpcode = offOpcode + 2;
1569 }
1570 else
1571 *pu32 = 0;
1572 return rcStrict;
1573}
1574
1575
1576/**
1577 * Fetches the next opcode word, zero extending it to a double word.
1578 *
1579 * @returns Strict VBox status code.
1580 * @param pIemCpu The IEM state.
1581 * @param pu32 Where to return the opcode double word.
1582 */
1583DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1584{
1585 uint8_t const offOpcode = pIemCpu->offOpcode;
1586 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1587 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1588
1589 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1590 pIemCpu->offOpcode = offOpcode + 2;
1591 return VINF_SUCCESS;
1592}
1593
1594
1595/**
1596 * Fetches the next opcode word and zero extends it to a double word, returns
1597 * automatically on failure.
1598 *
1599 * @param a_pu32 Where to return the opcode double word.
1600 * @remark Implicitly references pIemCpu.
1601 */
1602#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1603 do \
1604 { \
1605 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1606 if (rcStrict2 != VINF_SUCCESS) \
1607 return rcStrict2; \
1608 } while (0)
1609
1610
1611/**
1612 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1613 *
1614 * @returns Strict VBox status code.
1615 * @param pIemCpu The IEM state.
1616 * @param pu64 Where to return the opcode quad word.
1617 */
1618DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1619{
1620 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1621 if (rcStrict == VINF_SUCCESS)
1622 {
1623 uint8_t offOpcode = pIemCpu->offOpcode;
1624 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1625 pIemCpu->offOpcode = offOpcode + 2;
1626 }
1627 else
1628 *pu64 = 0;
1629 return rcStrict;
1630}
1631
1632
1633/**
1634 * Fetches the next opcode word, zero extending it to a quad word.
1635 *
1636 * @returns Strict VBox status code.
1637 * @param pIemCpu The IEM state.
1638 * @param pu64 Where to return the opcode quad word.
1639 */
1640DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1641{
1642 uint8_t const offOpcode = pIemCpu->offOpcode;
1643 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1644 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1645
1646 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1647 pIemCpu->offOpcode = offOpcode + 2;
1648 return VINF_SUCCESS;
1649}
1650
1651
1652/**
1653 * Fetches the next opcode word and zero extends it to a quad word, returns
1654 * automatically on failure.
1655 *
1656 * @param a_pu64 Where to return the opcode quad word.
1657 * @remark Implicitly references pIemCpu.
1658 */
1659#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1660 do \
1661 { \
1662 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1663 if (rcStrict2 != VINF_SUCCESS) \
1664 return rcStrict2; \
1665 } while (0)
1666
1667
1668/**
1669 * Fetches the next signed word from the opcode stream.
1670 *
1671 * @returns Strict VBox status code.
1672 * @param pIemCpu The IEM state.
1673 * @param pi16 Where to return the signed word.
1674 */
1675DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1676{
1677 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1678}
1679
1680
1681/**
1682 * Fetches the next signed word from the opcode stream, returning automatically
1683 * on failure.
1684 *
1685 * @param a_pi16 Where to return the signed word.
1686 * @remark Implicitly references pIemCpu.
1687 */
1688#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1689 do \
1690 { \
1691 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1692 if (rcStrict2 != VINF_SUCCESS) \
1693 return rcStrict2; \
1694 } while (0)
1695
1696
1697/**
1698 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1699 *
1700 * @returns Strict VBox status code.
1701 * @param pIemCpu The IEM state.
1702 * @param pu32 Where to return the opcode dword.
1703 */
1704DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1705{
1706 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1707 if (rcStrict == VINF_SUCCESS)
1708 {
1709 uint8_t offOpcode = pIemCpu->offOpcode;
1710 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1711 pIemCpu->abOpcode[offOpcode + 1],
1712 pIemCpu->abOpcode[offOpcode + 2],
1713 pIemCpu->abOpcode[offOpcode + 3]);
1714 pIemCpu->offOpcode = offOpcode + 4;
1715 }
1716 else
1717 *pu32 = 0;
1718 return rcStrict;
1719}
1720
1721
1722/**
1723 * Fetches the next opcode dword.
1724 *
1725 * @returns Strict VBox status code.
1726 * @param pIemCpu The IEM state.
1727 * @param pu32 Where to return the opcode double word.
1728 */
1729DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1730{
1731 uint8_t const offOpcode = pIemCpu->offOpcode;
1732 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1733 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1734
1735 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1736 pIemCpu->abOpcode[offOpcode + 1],
1737 pIemCpu->abOpcode[offOpcode + 2],
1738 pIemCpu->abOpcode[offOpcode + 3]);
1739 pIemCpu->offOpcode = offOpcode + 4;
1740 return VINF_SUCCESS;
1741}
1742
1743
1744/**
1745 * Fetches the next opcode dword, returns automatically on failure.
1746 *
1747 * @param a_pu32 Where to return the opcode dword.
1748 * @remark Implicitly references pIemCpu.
1749 */
1750#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1751 do \
1752 { \
1753 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1754 if (rcStrict2 != VINF_SUCCESS) \
1755 return rcStrict2; \
1756 } while (0)
1757
1758
1759/**
1760 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1761 *
1762 * @returns Strict VBox status code.
1763 * @param pIemCpu The IEM state.
1764 * @param pu64 Where to return the opcode dword.
1765 */
1766DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1767{
1768 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1769 if (rcStrict == VINF_SUCCESS)
1770 {
1771 uint8_t offOpcode = pIemCpu->offOpcode;
1772 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1773 pIemCpu->abOpcode[offOpcode + 1],
1774 pIemCpu->abOpcode[offOpcode + 2],
1775 pIemCpu->abOpcode[offOpcode + 3]);
1776 pIemCpu->offOpcode = offOpcode + 4;
1777 }
1778 else
1779 *pu64 = 0;
1780 return rcStrict;
1781}
1782
1783
1784/**
1785 * Fetches the next opcode dword, zero extending it to a quad word.
1786 *
1787 * @returns Strict VBox status code.
1788 * @param pIemCpu The IEM state.
1789 * @param pu64 Where to return the opcode quad word.
1790 */
1791DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1792{
1793 uint8_t const offOpcode = pIemCpu->offOpcode;
1794 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1795 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1796
1797 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1798 pIemCpu->abOpcode[offOpcode + 1],
1799 pIemCpu->abOpcode[offOpcode + 2],
1800 pIemCpu->abOpcode[offOpcode + 3]);
1801 pIemCpu->offOpcode = offOpcode + 4;
1802 return VINF_SUCCESS;
1803}
1804
1805
1806/**
1807 * Fetches the next opcode dword and zero extends it to a quad word, returns
1808 * automatically on failure.
1809 *
1810 * @param a_pu64 Where to return the opcode quad word.
1811 * @remark Implicitly references pIemCpu.
1812 */
1813#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1814 do \
1815 { \
1816 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1817 if (rcStrict2 != VINF_SUCCESS) \
1818 return rcStrict2; \
1819 } while (0)
1820
1821
1822/**
1823 * Fetches the next signed double word from the opcode stream.
1824 *
1825 * @returns Strict VBox status code.
1826 * @param pIemCpu The IEM state.
1827 * @param pi32 Where to return the signed double word.
1828 */
1829DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1830{
1831 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1832}
1833
1834/**
1835 * Fetches the next signed double word from the opcode stream, returning
1836 * automatically on failure.
1837 *
1838 * @param a_pi32 Where to return the signed double word.
1839 * @remark Implicitly references pIemCpu.
1840 */
1841#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1842 do \
1843 { \
1844 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1845 if (rcStrict2 != VINF_SUCCESS) \
1846 return rcStrict2; \
1847 } while (0)
1848
1849
1850/**
1851 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1852 *
1853 * @returns Strict VBox status code.
1854 * @param pIemCpu The IEM state.
1855 * @param pu64 Where to return the opcode qword.
1856 */
1857DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1858{
1859 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1860 if (rcStrict == VINF_SUCCESS)
1861 {
1862 uint8_t offOpcode = pIemCpu->offOpcode;
1863 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1864 pIemCpu->abOpcode[offOpcode + 1],
1865 pIemCpu->abOpcode[offOpcode + 2],
1866 pIemCpu->abOpcode[offOpcode + 3]);
1867 pIemCpu->offOpcode = offOpcode + 4;
1868 }
1869 else
1870 *pu64 = 0;
1871 return rcStrict;
1872}
1873
1874
1875/**
1876 * Fetches the next opcode dword, sign extending it into a quad word.
1877 *
1878 * @returns Strict VBox status code.
1879 * @param pIemCpu The IEM state.
1880 * @param pu64 Where to return the opcode quad word.
1881 */
1882DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1883{
1884 uint8_t const offOpcode = pIemCpu->offOpcode;
1885 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1886 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1887
1888 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1889 pIemCpu->abOpcode[offOpcode + 1],
1890 pIemCpu->abOpcode[offOpcode + 2],
1891 pIemCpu->abOpcode[offOpcode + 3]);
1892 *pu64 = i32;
1893 pIemCpu->offOpcode = offOpcode + 4;
1894 return VINF_SUCCESS;
1895}
1896
1897
1898/**
1899 * Fetches the next opcode double word and sign extends it to a quad word,
1900 * returns automatically on failure.
1901 *
1902 * @param a_pu64 Where to return the opcode quad word.
1903 * @remark Implicitly references pIemCpu.
1904 */
1905#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1906 do \
1907 { \
1908 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1909 if (rcStrict2 != VINF_SUCCESS) \
1910 return rcStrict2; \
1911 } while (0)
1912
1913
1914/**
1915 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1916 *
1917 * @returns Strict VBox status code.
1918 * @param pIemCpu The IEM state.
1919 * @param pu64 Where to return the opcode qword.
1920 */
1921DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1922{
1923 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1924 if (rcStrict == VINF_SUCCESS)
1925 {
1926 uint8_t offOpcode = pIemCpu->offOpcode;
1927 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1928 pIemCpu->abOpcode[offOpcode + 1],
1929 pIemCpu->abOpcode[offOpcode + 2],
1930 pIemCpu->abOpcode[offOpcode + 3],
1931 pIemCpu->abOpcode[offOpcode + 4],
1932 pIemCpu->abOpcode[offOpcode + 5],
1933 pIemCpu->abOpcode[offOpcode + 6],
1934 pIemCpu->abOpcode[offOpcode + 7]);
1935 pIemCpu->offOpcode = offOpcode + 8;
1936 }
1937 else
1938 *pu64 = 0;
1939 return rcStrict;
1940}
1941
1942
1943/**
1944 * Fetches the next opcode qword.
1945 *
1946 * @returns Strict VBox status code.
1947 * @param pIemCpu The IEM state.
1948 * @param pu64 Where to return the opcode qword.
1949 */
1950DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1951{
1952 uint8_t const offOpcode = pIemCpu->offOpcode;
1953 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1954 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1955
1956 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1957 pIemCpu->abOpcode[offOpcode + 1],
1958 pIemCpu->abOpcode[offOpcode + 2],
1959 pIemCpu->abOpcode[offOpcode + 3],
1960 pIemCpu->abOpcode[offOpcode + 4],
1961 pIemCpu->abOpcode[offOpcode + 5],
1962 pIemCpu->abOpcode[offOpcode + 6],
1963 pIemCpu->abOpcode[offOpcode + 7]);
1964 pIemCpu->offOpcode = offOpcode + 8;
1965 return VINF_SUCCESS;
1966}
1967
1968
1969/**
1970 * Fetches the next opcode quad word, returns automatically on failure.
1971 *
1972 * @param a_pu64 Where to return the opcode quad word.
1973 * @remark Implicitly references pIemCpu.
1974 */
1975#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1976 do \
1977 { \
1978 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1979 if (rcStrict2 != VINF_SUCCESS) \
1980 return rcStrict2; \
1981 } while (0)
1982
1983
1984/** @name Misc Worker Functions.
1985 * @{
1986 */
1987
1988
1989/**
1990 * Validates a new SS segment.
1991 *
1992 * @returns VBox strict status code.
1993 * @param pIemCpu The IEM per CPU instance data.
1994 * @param pCtx The CPU context.
1995 * @param NewSS The new SS selctor.
1996 * @param uCpl The CPL to load the stack for.
1997 * @param pDesc Where to return the descriptor.
1998 */
1999IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
2000{
2001 NOREF(pCtx);
2002
2003 /* Null selectors are not allowed (we're not called for dispatching
2004 interrupts with SS=0 in long mode). */
2005 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2006 {
2007 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2008 return iemRaiseTaskSwitchFault0(pIemCpu);
2009 }
2010
2011 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2012 if ((NewSS & X86_SEL_RPL) != uCpl)
2013 {
2014 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2015 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2016 }
2017
2018 /*
2019 * Read the descriptor.
2020 */
2021 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
2022 if (rcStrict != VINF_SUCCESS)
2023 return rcStrict;
2024
2025 /*
2026 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2027 */
2028 if (!pDesc->Legacy.Gen.u1DescType)
2029 {
2030 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2031 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2032 }
2033
2034 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2035 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2036 {
2037 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2038 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2039 }
2040 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2041 {
2042 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2043 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2044 }
2045
2046 /* Is it there? */
2047 /** @todo testcase: Is this checked before the canonical / limit check below? */
2048 if (!pDesc->Legacy.Gen.u1Present)
2049 {
2050 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2051 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2052 }
2053
2054 return VINF_SUCCESS;
2055}
2056
2057
2058/**
2059 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2060 * not.
2061 *
2062 * @param a_pIemCpu The IEM per CPU data.
2063 * @param a_pCtx The CPU context.
2064 */
2065#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2066# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2067 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2068 ? (a_pCtx)->eflags.u \
2069 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2070#else
2071# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2072 ( (a_pCtx)->eflags.u )
2073#endif
2074
2075/**
2076 * Updates the EFLAGS in the correct manner wrt. PATM.
2077 *
2078 * @param a_pIemCpu The IEM per CPU data.
2079 * @param a_pCtx The CPU context.
2080 * @param a_fEfl The new EFLAGS.
2081 */
2082#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2083# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2084 do { \
2085 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2086 (a_pCtx)->eflags.u = (a_fEfl); \
2087 else \
2088 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2089 } while (0)
2090#else
2091# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2092 do { \
2093 (a_pCtx)->eflags.u = (a_fEfl); \
2094 } while (0)
2095#endif
2096
2097
2098/** @} */
2099
2100/** @name Raising Exceptions.
2101 *
2102 * @{
2103 */
2104
2105/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2106 * @{ */
2107/** CPU exception. */
2108#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2109/** External interrupt (from PIC, APIC, whatever). */
2110#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2111/** Software interrupt (int or into, not bound).
2112 * Returns to the following instruction */
2113#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2114/** Takes an error code. */
2115#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2116/** Takes a CR2. */
2117#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2118/** Generated by the breakpoint instruction. */
2119#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2120/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2121#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2122/** @} */
2123
2124
2125/**
2126 * Loads the specified stack far pointer from the TSS.
2127 *
2128 * @returns VBox strict status code.
2129 * @param pIemCpu The IEM per CPU instance data.
2130 * @param pCtx The CPU context.
2131 * @param uCpl The CPL to load the stack for.
2132 * @param pSelSS Where to return the new stack segment.
2133 * @param puEsp Where to return the new stack pointer.
2134 */
2135IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2136 PRTSEL pSelSS, uint32_t *puEsp)
2137{
2138 VBOXSTRICTRC rcStrict;
2139 Assert(uCpl < 4);
2140
2141 switch (pCtx->tr.Attr.n.u4Type)
2142 {
2143 /*
2144 * 16-bit TSS (X86TSS16).
2145 */
2146 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2147 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2148 {
2149 uint32_t off = uCpl * 4 + 2;
2150 if (off + 4 <= pCtx->tr.u32Limit)
2151 {
2152 /** @todo check actual access pattern here. */
2153 uint32_t u32Tmp = 0; /* gcc maybe... */
2154 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2155 if (rcStrict == VINF_SUCCESS)
2156 {
2157 *puEsp = RT_LOWORD(u32Tmp);
2158 *pSelSS = RT_HIWORD(u32Tmp);
2159 return VINF_SUCCESS;
2160 }
2161 }
2162 else
2163 {
2164 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2165 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2166 }
2167 break;
2168 }
2169
2170 /*
2171 * 32-bit TSS (X86TSS32).
2172 */
2173 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2174 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2175 {
2176 uint32_t off = uCpl * 8 + 4;
2177 if (off + 7 <= pCtx->tr.u32Limit)
2178 {
2179/** @todo check actual access pattern here. */
2180 uint64_t u64Tmp;
2181 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2182 if (rcStrict == VINF_SUCCESS)
2183 {
2184 *puEsp = u64Tmp & UINT32_MAX;
2185 *pSelSS = (RTSEL)(u64Tmp >> 32);
2186 return VINF_SUCCESS;
2187 }
2188 }
2189 else
2190 {
2191 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2192 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2193 }
2194 break;
2195 }
2196
2197 default:
2198 AssertFailed();
2199 rcStrict = VERR_IEM_IPE_4;
2200 break;
2201 }
2202
2203 *puEsp = 0; /* make gcc happy */
2204 *pSelSS = 0; /* make gcc happy */
2205 return rcStrict;
2206}
2207
2208
2209/**
2210 * Loads the specified stack pointer from the 64-bit TSS.
2211 *
2212 * @returns VBox strict status code.
2213 * @param pIemCpu The IEM per CPU instance data.
2214 * @param pCtx The CPU context.
2215 * @param uCpl The CPL to load the stack for.
2216 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2217 * @param puRsp Where to return the new stack pointer.
2218 */
2219IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2220{
2221 Assert(uCpl < 4);
2222 Assert(uIst < 8);
2223 *puRsp = 0; /* make gcc happy */
2224
2225 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2226
2227 uint32_t off;
2228 if (uIst)
2229 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2230 else
2231 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2232 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2233 {
2234 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2235 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2236 }
2237
2238 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2239}
2240
2241
2242/**
2243 * Adjust the CPU state according to the exception being raised.
2244 *
2245 * @param pCtx The CPU context.
2246 * @param u8Vector The exception that has been raised.
2247 */
2248DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2249{
2250 switch (u8Vector)
2251 {
2252 case X86_XCPT_DB:
2253 pCtx->dr[7] &= ~X86_DR7_GD;
2254 break;
2255 /** @todo Read the AMD and Intel exception reference... */
2256 }
2257}
2258
2259
2260/**
2261 * Implements exceptions and interrupts for real mode.
2262 *
2263 * @returns VBox strict status code.
2264 * @param pIemCpu The IEM per CPU instance data.
2265 * @param pCtx The CPU context.
2266 * @param cbInstr The number of bytes to offset rIP by in the return
2267 * address.
2268 * @param u8Vector The interrupt / exception vector number.
2269 * @param fFlags The flags.
2270 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2271 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2272 */
2273IEM_STATIC VBOXSTRICTRC
2274iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2275 PCPUMCTX pCtx,
2276 uint8_t cbInstr,
2277 uint8_t u8Vector,
2278 uint32_t fFlags,
2279 uint16_t uErr,
2280 uint64_t uCr2)
2281{
2282 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2283 NOREF(uErr); NOREF(uCr2);
2284
2285 /*
2286 * Read the IDT entry.
2287 */
2288 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2289 {
2290 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2291 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2292 }
2293 RTFAR16 Idte;
2294 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2295 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2296 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2297 return rcStrict;
2298
2299 /*
2300 * Push the stack frame.
2301 */
2302 uint16_t *pu16Frame;
2303 uint64_t uNewRsp;
2304 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2305 if (rcStrict != VINF_SUCCESS)
2306 return rcStrict;
2307
2308 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2309#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2310 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2311 if (pIemCpu->uTargetCpu <= IEMTARGETCPU_186)
2312 fEfl |= UINT16_C(0xf000);
2313#endif
2314 pu16Frame[2] = (uint16_t)fEfl;
2315 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2316 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2317 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2318 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2319 return rcStrict;
2320
2321 /*
2322 * Load the vector address into cs:ip and make exception specific state
2323 * adjustments.
2324 */
2325 pCtx->cs.Sel = Idte.sel;
2326 pCtx->cs.ValidSel = Idte.sel;
2327 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2328 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2329 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2330 pCtx->rip = Idte.off;
2331 fEfl &= ~X86_EFL_IF;
2332 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2333
2334 /** @todo do we actually do this in real mode? */
2335 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2336 iemRaiseXcptAdjustState(pCtx, u8Vector);
2337
2338 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2339}
2340
2341
2342/**
2343 * Loads a NULL data selector into when coming from V8086 mode.
2344 *
2345 * @param pIemCpu The IEM per CPU instance data.
2346 * @param pSReg Pointer to the segment register.
2347 */
2348IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2349{
2350 pSReg->Sel = 0;
2351 pSReg->ValidSel = 0;
2352 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2353 {
2354 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2355 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2356 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2357 }
2358 else
2359 {
2360 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2361 /** @todo check this on AMD-V */
2362 pSReg->u64Base = 0;
2363 pSReg->u32Limit = 0;
2364 }
2365}
2366
2367
2368/**
2369 * Loads a segment selector during a task switch in V8086 mode.
2370 *
2371 * @param pIemCpu The IEM per CPU instance data.
2372 * @param pSReg Pointer to the segment register.
2373 * @param uSel The selector value to load.
2374 */
2375IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2376{
2377 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2378 pSReg->Sel = uSel;
2379 pSReg->ValidSel = uSel;
2380 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2381 pSReg->u64Base = uSel << 4;
2382 pSReg->u32Limit = 0xffff;
2383 pSReg->Attr.u = 0xf3;
2384}
2385
2386
2387/**
2388 * Loads a NULL data selector into a selector register, both the hidden and
2389 * visible parts, in protected mode.
2390 *
2391 * @param pIemCpu The IEM state of the calling EMT.
2392 * @param pSReg Pointer to the segment register.
2393 * @param uRpl The RPL.
2394 */
2395IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2396{
2397 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2398 * data selector in protected mode. */
2399 pSReg->Sel = uRpl;
2400 pSReg->ValidSel = uRpl;
2401 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2402 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2403 {
2404 /* VT-x (Intel 3960x) observed doing something like this. */
2405 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2406 pSReg->u32Limit = UINT32_MAX;
2407 pSReg->u64Base = 0;
2408 }
2409 else
2410 {
2411 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2412 pSReg->u32Limit = 0;
2413 pSReg->u64Base = 0;
2414 }
2415}
2416
2417
2418/**
2419 * Loads a segment selector during a task switch in protected mode.
2420 *
2421 * In this task switch scenario, we would throw \#TS exceptions rather than
2422 * \#GPs.
2423 *
2424 * @returns VBox strict status code.
2425 * @param pIemCpu The IEM per CPU instance data.
2426 * @param pSReg Pointer to the segment register.
2427 * @param uSel The new selector value.
2428 *
2429 * @remarks This does _not_ handle CS or SS.
2430 * @remarks This expects pIemCpu->uCpl to be up to date.
2431 */
2432IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2433{
2434 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2435
2436 /* Null data selector. */
2437 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2438 {
2439 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2440 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2441 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2442 return VINF_SUCCESS;
2443 }
2444
2445 /* Fetch the descriptor. */
2446 IEMSELDESC Desc;
2447 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2448 if (rcStrict != VINF_SUCCESS)
2449 {
2450 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2451 VBOXSTRICTRC_VAL(rcStrict)));
2452 return rcStrict;
2453 }
2454
2455 /* Must be a data segment or readable code segment. */
2456 if ( !Desc.Legacy.Gen.u1DescType
2457 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2458 {
2459 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2460 Desc.Legacy.Gen.u4Type));
2461 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2462 }
2463
2464 /* Check privileges for data segments and non-conforming code segments. */
2465 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2466 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2467 {
2468 /* The RPL and the new CPL must be less than or equal to the DPL. */
2469 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2470 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2471 {
2472 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2473 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2474 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2475 }
2476 }
2477
2478 /* Is it there? */
2479 if (!Desc.Legacy.Gen.u1Present)
2480 {
2481 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2482 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2483 }
2484
2485 /* The base and limit. */
2486 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2487 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2488
2489 /*
2490 * Ok, everything checked out fine. Now set the accessed bit before
2491 * committing the result into the registers.
2492 */
2493 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2494 {
2495 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2496 if (rcStrict != VINF_SUCCESS)
2497 return rcStrict;
2498 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2499 }
2500
2501 /* Commit */
2502 pSReg->Sel = uSel;
2503 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2504 pSReg->u32Limit = cbLimit;
2505 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2506 pSReg->ValidSel = uSel;
2507 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2508 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2509 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2510
2511 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2512 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2513 return VINF_SUCCESS;
2514}
2515
2516
2517/**
2518 * Performs a task switch.
2519 *
2520 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2521 * caller is responsible for performing the necessary checks (like DPL, TSS
2522 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2523 * reference for JMP, CALL, IRET.
2524 *
2525 * If the task switch is the due to a software interrupt or hardware exception,
2526 * the caller is responsible for validating the TSS selector and descriptor. See
2527 * Intel Instruction reference for INT n.
2528 *
2529 * @returns VBox strict status code.
2530 * @param pIemCpu The IEM per CPU instance data.
2531 * @param pCtx The CPU context.
2532 * @param enmTaskSwitch What caused this task switch.
2533 * @param uNextEip The EIP effective after the task switch.
2534 * @param fFlags The flags.
2535 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2536 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2537 * @param SelTSS The TSS selector of the new task.
2538 * @param pNewDescTSS Pointer to the new TSS descriptor.
2539 */
2540IEM_STATIC VBOXSTRICTRC
2541iemTaskSwitch(PIEMCPU pIemCpu,
2542 PCPUMCTX pCtx,
2543 IEMTASKSWITCH enmTaskSwitch,
2544 uint32_t uNextEip,
2545 uint32_t fFlags,
2546 uint16_t uErr,
2547 uint64_t uCr2,
2548 RTSEL SelTSS,
2549 PIEMSELDESC pNewDescTSS)
2550{
2551 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2552 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2553
2554 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2555 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2556 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2557 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2558 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2559
2560 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2561 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2562
2563 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2564 fIsNewTSS386, pCtx->eip, uNextEip));
2565
2566 /* Update CR2 in case it's a page-fault. */
2567 /** @todo This should probably be done much earlier in IEM/PGM. See
2568 * @bugref{5653#c49}. */
2569 if (fFlags & IEM_XCPT_FLAGS_CR2)
2570 pCtx->cr2 = uCr2;
2571
2572 /*
2573 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2574 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2575 */
2576 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2577 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2578 if (uNewTSSLimit < uNewTSSLimitMin)
2579 {
2580 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2581 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2582 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2583 }
2584
2585 /*
2586 * Check the current TSS limit. The last written byte to the current TSS during the
2587 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2588 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2589 *
2590 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2591 * end up with smaller than "legal" TSS limits.
2592 */
2593 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2594 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2595 if (uCurTSSLimit < uCurTSSLimitMin)
2596 {
2597 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2598 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2599 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2600 }
2601
2602 /*
2603 * Verify that the new TSS can be accessed and map it. Map only the required contents
2604 * and not the entire TSS.
2605 */
2606 void *pvNewTSS;
2607 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2608 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2609 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2610 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2611 * not perform correct translation if this happens. See Intel spec. 7.2.1
2612 * "Task-State Segment" */
2613 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2614 if (rcStrict != VINF_SUCCESS)
2615 {
2616 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2617 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2618 return rcStrict;
2619 }
2620
2621 /*
2622 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2623 */
2624 uint32_t u32EFlags = pCtx->eflags.u32;
2625 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2626 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2627 {
2628 PX86DESC pDescCurTSS;
2629 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2630 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2631 if (rcStrict != VINF_SUCCESS)
2632 {
2633 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2634 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2635 return rcStrict;
2636 }
2637
2638 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2639 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2640 if (rcStrict != VINF_SUCCESS)
2641 {
2642 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2643 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2644 return rcStrict;
2645 }
2646
2647 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2648 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2649 {
2650 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2651 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2652 u32EFlags &= ~X86_EFL_NT;
2653 }
2654 }
2655
2656 /*
2657 * Save the CPU state into the current TSS.
2658 */
2659 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2660 if (GCPtrNewTSS == GCPtrCurTSS)
2661 {
2662 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2663 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2664 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2665 }
2666 if (fIsNewTSS386)
2667 {
2668 /*
2669 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2670 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2671 */
2672 void *pvCurTSS32;
2673 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2674 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2675 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2676 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2677 if (rcStrict != VINF_SUCCESS)
2678 {
2679 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2680 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2681 return rcStrict;
2682 }
2683
2684 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2685 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2686 pCurTSS32->eip = uNextEip;
2687 pCurTSS32->eflags = u32EFlags;
2688 pCurTSS32->eax = pCtx->eax;
2689 pCurTSS32->ecx = pCtx->ecx;
2690 pCurTSS32->edx = pCtx->edx;
2691 pCurTSS32->ebx = pCtx->ebx;
2692 pCurTSS32->esp = pCtx->esp;
2693 pCurTSS32->ebp = pCtx->ebp;
2694 pCurTSS32->esi = pCtx->esi;
2695 pCurTSS32->edi = pCtx->edi;
2696 pCurTSS32->es = pCtx->es.Sel;
2697 pCurTSS32->cs = pCtx->cs.Sel;
2698 pCurTSS32->ss = pCtx->ss.Sel;
2699 pCurTSS32->ds = pCtx->ds.Sel;
2700 pCurTSS32->fs = pCtx->fs.Sel;
2701 pCurTSS32->gs = pCtx->gs.Sel;
2702
2703 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2704 if (rcStrict != VINF_SUCCESS)
2705 {
2706 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2707 VBOXSTRICTRC_VAL(rcStrict)));
2708 return rcStrict;
2709 }
2710 }
2711 else
2712 {
2713 /*
2714 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2715 */
2716 void *pvCurTSS16;
2717 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2718 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2719 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2720 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2721 if (rcStrict != VINF_SUCCESS)
2722 {
2723 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2724 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2725 return rcStrict;
2726 }
2727
2728 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2729 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2730 pCurTSS16->ip = uNextEip;
2731 pCurTSS16->flags = u32EFlags;
2732 pCurTSS16->ax = pCtx->ax;
2733 pCurTSS16->cx = pCtx->cx;
2734 pCurTSS16->dx = pCtx->dx;
2735 pCurTSS16->bx = pCtx->bx;
2736 pCurTSS16->sp = pCtx->sp;
2737 pCurTSS16->bp = pCtx->bp;
2738 pCurTSS16->si = pCtx->si;
2739 pCurTSS16->di = pCtx->di;
2740 pCurTSS16->es = pCtx->es.Sel;
2741 pCurTSS16->cs = pCtx->cs.Sel;
2742 pCurTSS16->ss = pCtx->ss.Sel;
2743 pCurTSS16->ds = pCtx->ds.Sel;
2744
2745 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2746 if (rcStrict != VINF_SUCCESS)
2747 {
2748 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2749 VBOXSTRICTRC_VAL(rcStrict)));
2750 return rcStrict;
2751 }
2752 }
2753
2754 /*
2755 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2756 */
2757 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2758 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2759 {
2760 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2761 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2762 pNewTSS->selPrev = pCtx->tr.Sel;
2763 }
2764
2765 /*
2766 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2767 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2768 */
2769 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2770 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2771 bool fNewDebugTrap;
2772 if (fIsNewTSS386)
2773 {
2774 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2775 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2776 uNewEip = pNewTSS32->eip;
2777 uNewEflags = pNewTSS32->eflags;
2778 uNewEax = pNewTSS32->eax;
2779 uNewEcx = pNewTSS32->ecx;
2780 uNewEdx = pNewTSS32->edx;
2781 uNewEbx = pNewTSS32->ebx;
2782 uNewEsp = pNewTSS32->esp;
2783 uNewEbp = pNewTSS32->ebp;
2784 uNewEsi = pNewTSS32->esi;
2785 uNewEdi = pNewTSS32->edi;
2786 uNewES = pNewTSS32->es;
2787 uNewCS = pNewTSS32->cs;
2788 uNewSS = pNewTSS32->ss;
2789 uNewDS = pNewTSS32->ds;
2790 uNewFS = pNewTSS32->fs;
2791 uNewGS = pNewTSS32->gs;
2792 uNewLdt = pNewTSS32->selLdt;
2793 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2794 }
2795 else
2796 {
2797 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2798 uNewCr3 = 0;
2799 uNewEip = pNewTSS16->ip;
2800 uNewEflags = pNewTSS16->flags;
2801 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2802 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2803 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2804 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2805 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2806 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2807 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2808 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2809 uNewES = pNewTSS16->es;
2810 uNewCS = pNewTSS16->cs;
2811 uNewSS = pNewTSS16->ss;
2812 uNewDS = pNewTSS16->ds;
2813 uNewFS = 0;
2814 uNewGS = 0;
2815 uNewLdt = pNewTSS16->selLdt;
2816 fNewDebugTrap = false;
2817 }
2818
2819 if (GCPtrNewTSS == GCPtrCurTSS)
2820 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2821 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2822
2823 /*
2824 * We're done accessing the new TSS.
2825 */
2826 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2827 if (rcStrict != VINF_SUCCESS)
2828 {
2829 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2830 return rcStrict;
2831 }
2832
2833 /*
2834 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2835 */
2836 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2837 {
2838 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2839 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2840 if (rcStrict != VINF_SUCCESS)
2841 {
2842 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2843 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2844 return rcStrict;
2845 }
2846
2847 /* Check that the descriptor indicates the new TSS is available (not busy). */
2848 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2849 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2850 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2851
2852 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2853 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2854 if (rcStrict != VINF_SUCCESS)
2855 {
2856 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2857 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2858 return rcStrict;
2859 }
2860 }
2861
2862 /*
2863 * From this point on, we're technically in the new task. We will defer exceptions
2864 * until the completion of the task switch but before executing any instructions in the new task.
2865 */
2866 pCtx->tr.Sel = SelTSS;
2867 pCtx->tr.ValidSel = SelTSS;
2868 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2869 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2870 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2871 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2872 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2873
2874 /* Set the busy bit in TR. */
2875 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2876 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2877 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2878 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2879 {
2880 uNewEflags |= X86_EFL_NT;
2881 }
2882
2883 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2884 pCtx->cr0 |= X86_CR0_TS;
2885 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2886
2887 pCtx->eip = uNewEip;
2888 pCtx->eax = uNewEax;
2889 pCtx->ecx = uNewEcx;
2890 pCtx->edx = uNewEdx;
2891 pCtx->ebx = uNewEbx;
2892 pCtx->esp = uNewEsp;
2893 pCtx->ebp = uNewEbp;
2894 pCtx->esi = uNewEsi;
2895 pCtx->edi = uNewEdi;
2896
2897 uNewEflags &= X86_EFL_LIVE_MASK;
2898 uNewEflags |= X86_EFL_RA1_MASK;
2899 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2900
2901 /*
2902 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2903 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2904 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2905 */
2906 pCtx->es.Sel = uNewES;
2907 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2908 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2909
2910 pCtx->cs.Sel = uNewCS;
2911 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2912 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2913
2914 pCtx->ss.Sel = uNewSS;
2915 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2916 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2917
2918 pCtx->ds.Sel = uNewDS;
2919 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2920 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2921
2922 pCtx->fs.Sel = uNewFS;
2923 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2924 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2925
2926 pCtx->gs.Sel = uNewGS;
2927 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2928 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2929 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2930
2931 pCtx->ldtr.Sel = uNewLdt;
2932 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2933 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2934 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2935
2936 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2937 {
2938 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2939 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2940 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2941 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2942 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2943 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2944 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2945 }
2946
2947 /*
2948 * Switch CR3 for the new task.
2949 */
2950 if ( fIsNewTSS386
2951 && (pCtx->cr0 & X86_CR0_PG))
2952 {
2953 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2954 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2955 {
2956 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2957 AssertRCSuccessReturn(rc, rc);
2958 }
2959 else
2960 pCtx->cr3 = uNewCr3;
2961
2962 /* Inform PGM. */
2963 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2964 {
2965 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2966 AssertRCReturn(rc, rc);
2967 /* ignore informational status codes */
2968 }
2969 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2970 }
2971
2972 /*
2973 * Switch LDTR for the new task.
2974 */
2975 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2976 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2977 else
2978 {
2979 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2980
2981 IEMSELDESC DescNewLdt;
2982 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2983 if (rcStrict != VINF_SUCCESS)
2984 {
2985 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2986 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2987 return rcStrict;
2988 }
2989 if ( !DescNewLdt.Legacy.Gen.u1Present
2990 || DescNewLdt.Legacy.Gen.u1DescType
2991 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2992 {
2993 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2994 uNewLdt, DescNewLdt.Legacy.u));
2995 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2996 }
2997
2998 pCtx->ldtr.ValidSel = uNewLdt;
2999 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3000 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3001 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3002 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3003 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3004 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3005 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
3006 }
3007
3008 IEMSELDESC DescSS;
3009 if (IEM_IS_V86_MODE(pIemCpu))
3010 {
3011 pIemCpu->uCpl = 3;
3012 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
3013 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
3014 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
3015 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
3016 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
3017 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
3018 }
3019 else
3020 {
3021 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
3022
3023 /*
3024 * Load the stack segment for the new task.
3025 */
3026 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3027 {
3028 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3029 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3030 }
3031
3032 /* Fetch the descriptor. */
3033 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
3034 if (rcStrict != VINF_SUCCESS)
3035 {
3036 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3037 VBOXSTRICTRC_VAL(rcStrict)));
3038 return rcStrict;
3039 }
3040
3041 /* SS must be a data segment and writable. */
3042 if ( !DescSS.Legacy.Gen.u1DescType
3043 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3044 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3045 {
3046 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3047 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3048 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3049 }
3050
3051 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3052 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3053 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3054 {
3055 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3056 uNewCpl));
3057 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3058 }
3059
3060 /* Is it there? */
3061 if (!DescSS.Legacy.Gen.u1Present)
3062 {
3063 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3064 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3065 }
3066
3067 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3068 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3069
3070 /* Set the accessed bit before committing the result into SS. */
3071 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3072 {
3073 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3074 if (rcStrict != VINF_SUCCESS)
3075 return rcStrict;
3076 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3077 }
3078
3079 /* Commit SS. */
3080 pCtx->ss.Sel = uNewSS;
3081 pCtx->ss.ValidSel = uNewSS;
3082 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3083 pCtx->ss.u32Limit = cbLimit;
3084 pCtx->ss.u64Base = u64Base;
3085 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3086 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3087
3088 /* CPL has changed, update IEM before loading rest of segments. */
3089 pIemCpu->uCpl = uNewCpl;
3090
3091 /*
3092 * Load the data segments for the new task.
3093 */
3094 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3095 if (rcStrict != VINF_SUCCESS)
3096 return rcStrict;
3097 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3098 if (rcStrict != VINF_SUCCESS)
3099 return rcStrict;
3100 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3101 if (rcStrict != VINF_SUCCESS)
3102 return rcStrict;
3103 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3104 if (rcStrict != VINF_SUCCESS)
3105 return rcStrict;
3106
3107 /*
3108 * Load the code segment for the new task.
3109 */
3110 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3111 {
3112 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3113 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3114 }
3115
3116 /* Fetch the descriptor. */
3117 IEMSELDESC DescCS;
3118 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3119 if (rcStrict != VINF_SUCCESS)
3120 {
3121 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3122 return rcStrict;
3123 }
3124
3125 /* CS must be a code segment. */
3126 if ( !DescCS.Legacy.Gen.u1DescType
3127 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3128 {
3129 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3130 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3131 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3132 }
3133
3134 /* For conforming CS, DPL must be less than or equal to the RPL. */
3135 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3136 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3137 {
3138 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3139 DescCS.Legacy.Gen.u2Dpl));
3140 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3141 }
3142
3143 /* For non-conforming CS, DPL must match RPL. */
3144 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3145 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3146 {
3147 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3148 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3149 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3150 }
3151
3152 /* Is it there? */
3153 if (!DescCS.Legacy.Gen.u1Present)
3154 {
3155 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3156 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3157 }
3158
3159 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3160 u64Base = X86DESC_BASE(&DescCS.Legacy);
3161
3162 /* Set the accessed bit before committing the result into CS. */
3163 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3164 {
3165 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3166 if (rcStrict != VINF_SUCCESS)
3167 return rcStrict;
3168 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3169 }
3170
3171 /* Commit CS. */
3172 pCtx->cs.Sel = uNewCS;
3173 pCtx->cs.ValidSel = uNewCS;
3174 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3175 pCtx->cs.u32Limit = cbLimit;
3176 pCtx->cs.u64Base = u64Base;
3177 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3178 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3179 }
3180
3181 /** @todo Debug trap. */
3182 if (fIsNewTSS386 && fNewDebugTrap)
3183 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3184
3185 /*
3186 * Construct the error code masks based on what caused this task switch.
3187 * See Intel Instruction reference for INT.
3188 */
3189 uint16_t uExt;
3190 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3191 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3192 {
3193 uExt = 1;
3194 }
3195 else
3196 uExt = 0;
3197
3198 /*
3199 * Push any error code on to the new stack.
3200 */
3201 if (fFlags & IEM_XCPT_FLAGS_ERR)
3202 {
3203 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3204 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3205 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3206
3207 /* Check that there is sufficient space on the stack. */
3208 /** @todo Factor out segment limit checking for normal/expand down segments
3209 * into a separate function. */
3210 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3211 {
3212 if ( pCtx->esp - 1 > cbLimitSS
3213 || pCtx->esp < cbStackFrame)
3214 {
3215 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3216 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3217 cbStackFrame));
3218 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3219 }
3220 }
3221 else
3222 {
3223 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3224 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3225 {
3226 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3227 cbStackFrame));
3228 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3229 }
3230 }
3231
3232
3233 if (fIsNewTSS386)
3234 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3235 else
3236 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3237 if (rcStrict != VINF_SUCCESS)
3238 {
3239 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3240 VBOXSTRICTRC_VAL(rcStrict)));
3241 return rcStrict;
3242 }
3243 }
3244
3245 /* Check the new EIP against the new CS limit. */
3246 if (pCtx->eip > pCtx->cs.u32Limit)
3247 {
3248 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3249 pCtx->eip, pCtx->cs.u32Limit));
3250 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3251 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3252 }
3253
3254 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3255 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3256}
3257
3258
3259/**
3260 * Implements exceptions and interrupts for protected mode.
3261 *
3262 * @returns VBox strict status code.
3263 * @param pIemCpu The IEM per CPU instance data.
3264 * @param pCtx The CPU context.
3265 * @param cbInstr The number of bytes to offset rIP by in the return
3266 * address.
3267 * @param u8Vector The interrupt / exception vector number.
3268 * @param fFlags The flags.
3269 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3270 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3271 */
3272IEM_STATIC VBOXSTRICTRC
3273iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3274 PCPUMCTX pCtx,
3275 uint8_t cbInstr,
3276 uint8_t u8Vector,
3277 uint32_t fFlags,
3278 uint16_t uErr,
3279 uint64_t uCr2)
3280{
3281 /*
3282 * Read the IDT entry.
3283 */
3284 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3285 {
3286 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3287 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3288 }
3289 X86DESC Idte;
3290 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3291 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3292 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3293 return rcStrict;
3294 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3295 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3296 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3297
3298 /*
3299 * Check the descriptor type, DPL and such.
3300 * ASSUMES this is done in the same order as described for call-gate calls.
3301 */
3302 if (Idte.Gate.u1DescType)
3303 {
3304 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3305 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3306 }
3307 bool fTaskGate = false;
3308 uint8_t f32BitGate = true;
3309 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3310 switch (Idte.Gate.u4Type)
3311 {
3312 case X86_SEL_TYPE_SYS_UNDEFINED:
3313 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3314 case X86_SEL_TYPE_SYS_LDT:
3315 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3316 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3317 case X86_SEL_TYPE_SYS_UNDEFINED2:
3318 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3319 case X86_SEL_TYPE_SYS_UNDEFINED3:
3320 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3321 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3322 case X86_SEL_TYPE_SYS_UNDEFINED4:
3323 {
3324 /** @todo check what actually happens when the type is wrong...
3325 * esp. call gates. */
3326 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3327 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3328 }
3329
3330 case X86_SEL_TYPE_SYS_286_INT_GATE:
3331 f32BitGate = false;
3332 case X86_SEL_TYPE_SYS_386_INT_GATE:
3333 fEflToClear |= X86_EFL_IF;
3334 break;
3335
3336 case X86_SEL_TYPE_SYS_TASK_GATE:
3337 fTaskGate = true;
3338#ifndef IEM_IMPLEMENTS_TASKSWITCH
3339 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3340#endif
3341 break;
3342
3343 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3344 f32BitGate = false;
3345 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3346 break;
3347
3348 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3349 }
3350
3351 /* Check DPL against CPL if applicable. */
3352 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3353 {
3354 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3355 {
3356 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3357 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3358 }
3359 }
3360
3361 /* Is it there? */
3362 if (!Idte.Gate.u1Present)
3363 {
3364 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3365 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3366 }
3367
3368 /* Is it a task-gate? */
3369 if (fTaskGate)
3370 {
3371 /*
3372 * Construct the error code masks based on what caused this task switch.
3373 * See Intel Instruction reference for INT.
3374 */
3375 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3376 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3377 RTSEL SelTSS = Idte.Gate.u16Sel;
3378
3379 /*
3380 * Fetch the TSS descriptor in the GDT.
3381 */
3382 IEMSELDESC DescTSS;
3383 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3384 if (rcStrict != VINF_SUCCESS)
3385 {
3386 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3387 VBOXSTRICTRC_VAL(rcStrict)));
3388 return rcStrict;
3389 }
3390
3391 /* The TSS descriptor must be a system segment and be available (not busy). */
3392 if ( DescTSS.Legacy.Gen.u1DescType
3393 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3394 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3395 {
3396 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3397 u8Vector, SelTSS, DescTSS.Legacy.au64));
3398 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3399 }
3400
3401 /* The TSS must be present. */
3402 if (!DescTSS.Legacy.Gen.u1Present)
3403 {
3404 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3405 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3406 }
3407
3408 /* Do the actual task switch. */
3409 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3410 }
3411
3412 /* A null CS is bad. */
3413 RTSEL NewCS = Idte.Gate.u16Sel;
3414 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3415 {
3416 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3417 return iemRaiseGeneralProtectionFault0(pIemCpu);
3418 }
3419
3420 /* Fetch the descriptor for the new CS. */
3421 IEMSELDESC DescCS;
3422 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3423 if (rcStrict != VINF_SUCCESS)
3424 {
3425 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3426 return rcStrict;
3427 }
3428
3429 /* Must be a code segment. */
3430 if (!DescCS.Legacy.Gen.u1DescType)
3431 {
3432 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3433 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3434 }
3435 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3436 {
3437 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3438 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3439 }
3440
3441 /* Don't allow lowering the privilege level. */
3442 /** @todo Does the lowering of privileges apply to software interrupts
3443 * only? This has bearings on the more-privileged or
3444 * same-privilege stack behavior further down. A testcase would
3445 * be nice. */
3446 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3447 {
3448 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3449 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3450 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3451 }
3452
3453 /* Make sure the selector is present. */
3454 if (!DescCS.Legacy.Gen.u1Present)
3455 {
3456 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3457 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3458 }
3459
3460 /* Check the new EIP against the new CS limit. */
3461 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3462 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3463 ? Idte.Gate.u16OffsetLow
3464 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3465 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3466 if (uNewEip > cbLimitCS)
3467 {
3468 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3469 u8Vector, uNewEip, cbLimitCS, NewCS));
3470 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3471 }
3472
3473 /* Calc the flag image to push. */
3474 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3475 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3476 fEfl &= ~X86_EFL_RF;
3477 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3478 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3479
3480 /* From V8086 mode only go to CPL 0. */
3481 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3482 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3483 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3484 {
3485 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3486 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3487 }
3488
3489 /*
3490 * If the privilege level changes, we need to get a new stack from the TSS.
3491 * This in turns means validating the new SS and ESP...
3492 */
3493 if (uNewCpl != pIemCpu->uCpl)
3494 {
3495 RTSEL NewSS;
3496 uint32_t uNewEsp;
3497 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3498 if (rcStrict != VINF_SUCCESS)
3499 return rcStrict;
3500
3501 IEMSELDESC DescSS;
3502 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3503 if (rcStrict != VINF_SUCCESS)
3504 return rcStrict;
3505
3506 /* Check that there is sufficient space for the stack frame. */
3507 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3508 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3509 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3510 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3511
3512 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3513 {
3514 if ( uNewEsp - 1 > cbLimitSS
3515 || uNewEsp < cbStackFrame)
3516 {
3517 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3518 u8Vector, NewSS, uNewEsp, cbStackFrame));
3519 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3520 }
3521 }
3522 else
3523 {
3524 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3525 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3526 {
3527 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3528 u8Vector, NewSS, uNewEsp, cbStackFrame));
3529 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3530 }
3531 }
3532
3533 /*
3534 * Start making changes.
3535 */
3536
3537 /* Create the stack frame. */
3538 RTPTRUNION uStackFrame;
3539 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3540 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3541 if (rcStrict != VINF_SUCCESS)
3542 return rcStrict;
3543 void * const pvStackFrame = uStackFrame.pv;
3544 if (f32BitGate)
3545 {
3546 if (fFlags & IEM_XCPT_FLAGS_ERR)
3547 *uStackFrame.pu32++ = uErr;
3548 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3549 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3550 uStackFrame.pu32[2] = fEfl;
3551 uStackFrame.pu32[3] = pCtx->esp;
3552 uStackFrame.pu32[4] = pCtx->ss.Sel;
3553 if (fEfl & X86_EFL_VM)
3554 {
3555 uStackFrame.pu32[1] = pCtx->cs.Sel;
3556 uStackFrame.pu32[5] = pCtx->es.Sel;
3557 uStackFrame.pu32[6] = pCtx->ds.Sel;
3558 uStackFrame.pu32[7] = pCtx->fs.Sel;
3559 uStackFrame.pu32[8] = pCtx->gs.Sel;
3560 }
3561 }
3562 else
3563 {
3564 if (fFlags & IEM_XCPT_FLAGS_ERR)
3565 *uStackFrame.pu16++ = uErr;
3566 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3567 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3568 uStackFrame.pu16[2] = fEfl;
3569 uStackFrame.pu16[3] = pCtx->sp;
3570 uStackFrame.pu16[4] = pCtx->ss.Sel;
3571 if (fEfl & X86_EFL_VM)
3572 {
3573 uStackFrame.pu16[1] = pCtx->cs.Sel;
3574 uStackFrame.pu16[5] = pCtx->es.Sel;
3575 uStackFrame.pu16[6] = pCtx->ds.Sel;
3576 uStackFrame.pu16[7] = pCtx->fs.Sel;
3577 uStackFrame.pu16[8] = pCtx->gs.Sel;
3578 }
3579 }
3580 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3581 if (rcStrict != VINF_SUCCESS)
3582 return rcStrict;
3583
3584 /* Mark the selectors 'accessed' (hope this is the correct time). */
3585 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3586 * after pushing the stack frame? (Write protect the gdt + stack to
3587 * find out.) */
3588 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3589 {
3590 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3591 if (rcStrict != VINF_SUCCESS)
3592 return rcStrict;
3593 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3594 }
3595
3596 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3597 {
3598 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3599 if (rcStrict != VINF_SUCCESS)
3600 return rcStrict;
3601 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3602 }
3603
3604 /*
3605 * Start comitting the register changes (joins with the DPL=CPL branch).
3606 */
3607 pCtx->ss.Sel = NewSS;
3608 pCtx->ss.ValidSel = NewSS;
3609 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3610 pCtx->ss.u32Limit = cbLimitSS;
3611 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3612 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3613 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3614 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3615 * SP is loaded).
3616 * Need to check the other combinations too:
3617 * - 16-bit TSS, 32-bit handler
3618 * - 32-bit TSS, 16-bit handler */
3619 if (!pCtx->ss.Attr.n.u1DefBig)
3620 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
3621 else
3622 pCtx->rsp = uNewEsp - cbStackFrame;
3623 pIemCpu->uCpl = uNewCpl;
3624
3625 if (fEfl & X86_EFL_VM)
3626 {
3627 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3628 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3629 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3630 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3631 }
3632 }
3633 /*
3634 * Same privilege, no stack change and smaller stack frame.
3635 */
3636 else
3637 {
3638 uint64_t uNewRsp;
3639 RTPTRUNION uStackFrame;
3640 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3641 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3642 if (rcStrict != VINF_SUCCESS)
3643 return rcStrict;
3644 void * const pvStackFrame = uStackFrame.pv;
3645
3646 if (f32BitGate)
3647 {
3648 if (fFlags & IEM_XCPT_FLAGS_ERR)
3649 *uStackFrame.pu32++ = uErr;
3650 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3651 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3652 uStackFrame.pu32[2] = fEfl;
3653 }
3654 else
3655 {
3656 if (fFlags & IEM_XCPT_FLAGS_ERR)
3657 *uStackFrame.pu16++ = uErr;
3658 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3659 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3660 uStackFrame.pu16[2] = fEfl;
3661 }
3662 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3663 if (rcStrict != VINF_SUCCESS)
3664 return rcStrict;
3665
3666 /* Mark the CS selector as 'accessed'. */
3667 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3668 {
3669 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3670 if (rcStrict != VINF_SUCCESS)
3671 return rcStrict;
3672 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3673 }
3674
3675 /*
3676 * Start committing the register changes (joins with the other branch).
3677 */
3678 pCtx->rsp = uNewRsp;
3679 }
3680
3681 /* ... register committing continues. */
3682 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3683 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3684 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3685 pCtx->cs.u32Limit = cbLimitCS;
3686 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3687 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3688
3689 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3690 fEfl &= ~fEflToClear;
3691 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3692
3693 if (fFlags & IEM_XCPT_FLAGS_CR2)
3694 pCtx->cr2 = uCr2;
3695
3696 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3697 iemRaiseXcptAdjustState(pCtx, u8Vector);
3698
3699 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3700}
3701
3702
3703/**
3704 * Implements exceptions and interrupts for long mode.
3705 *
3706 * @returns VBox strict status code.
3707 * @param pIemCpu The IEM per CPU instance data.
3708 * @param pCtx The CPU context.
3709 * @param cbInstr The number of bytes to offset rIP by in the return
3710 * address.
3711 * @param u8Vector The interrupt / exception vector number.
3712 * @param fFlags The flags.
3713 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3714 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3715 */
3716IEM_STATIC VBOXSTRICTRC
3717iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3718 PCPUMCTX pCtx,
3719 uint8_t cbInstr,
3720 uint8_t u8Vector,
3721 uint32_t fFlags,
3722 uint16_t uErr,
3723 uint64_t uCr2)
3724{
3725 /*
3726 * Read the IDT entry.
3727 */
3728 uint16_t offIdt = (uint16_t)u8Vector << 4;
3729 if (pCtx->idtr.cbIdt < offIdt + 7)
3730 {
3731 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3732 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3733 }
3734 X86DESC64 Idte;
3735 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3736 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3737 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3738 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3739 return rcStrict;
3740 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3741 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3742 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3743
3744 /*
3745 * Check the descriptor type, DPL and such.
3746 * ASSUMES this is done in the same order as described for call-gate calls.
3747 */
3748 if (Idte.Gate.u1DescType)
3749 {
3750 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3751 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3752 }
3753 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3754 switch (Idte.Gate.u4Type)
3755 {
3756 case AMD64_SEL_TYPE_SYS_INT_GATE:
3757 fEflToClear |= X86_EFL_IF;
3758 break;
3759 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3760 break;
3761
3762 default:
3763 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3764 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3765 }
3766
3767 /* Check DPL against CPL if applicable. */
3768 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3769 {
3770 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3771 {
3772 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3773 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3774 }
3775 }
3776
3777 /* Is it there? */
3778 if (!Idte.Gate.u1Present)
3779 {
3780 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3781 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3782 }
3783
3784 /* A null CS is bad. */
3785 RTSEL NewCS = Idte.Gate.u16Sel;
3786 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3787 {
3788 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3789 return iemRaiseGeneralProtectionFault0(pIemCpu);
3790 }
3791
3792 /* Fetch the descriptor for the new CS. */
3793 IEMSELDESC DescCS;
3794 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3795 if (rcStrict != VINF_SUCCESS)
3796 {
3797 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3798 return rcStrict;
3799 }
3800
3801 /* Must be a 64-bit code segment. */
3802 if (!DescCS.Long.Gen.u1DescType)
3803 {
3804 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3805 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3806 }
3807 if ( !DescCS.Long.Gen.u1Long
3808 || DescCS.Long.Gen.u1DefBig
3809 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3810 {
3811 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3812 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3813 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3814 }
3815
3816 /* Don't allow lowering the privilege level. For non-conforming CS
3817 selectors, the CS.DPL sets the privilege level the trap/interrupt
3818 handler runs at. For conforming CS selectors, the CPL remains
3819 unchanged, but the CS.DPL must be <= CPL. */
3820 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3821 * when CPU in Ring-0. Result \#GP? */
3822 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3823 {
3824 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3825 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3826 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3827 }
3828
3829
3830 /* Make sure the selector is present. */
3831 if (!DescCS.Legacy.Gen.u1Present)
3832 {
3833 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3834 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3835 }
3836
3837 /* Check that the new RIP is canonical. */
3838 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3839 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3840 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3841 if (!IEM_IS_CANONICAL(uNewRip))
3842 {
3843 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3844 return iemRaiseGeneralProtectionFault0(pIemCpu);
3845 }
3846
3847 /*
3848 * If the privilege level changes or if the IST isn't zero, we need to get
3849 * a new stack from the TSS.
3850 */
3851 uint64_t uNewRsp;
3852 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3853 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3854 if ( uNewCpl != pIemCpu->uCpl
3855 || Idte.Gate.u3IST != 0)
3856 {
3857 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3858 if (rcStrict != VINF_SUCCESS)
3859 return rcStrict;
3860 }
3861 else
3862 uNewRsp = pCtx->rsp;
3863 uNewRsp &= ~(uint64_t)0xf;
3864
3865 /*
3866 * Calc the flag image to push.
3867 */
3868 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3869 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3870 fEfl &= ~X86_EFL_RF;
3871 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3872 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3873
3874 /*
3875 * Start making changes.
3876 */
3877
3878 /* Create the stack frame. */
3879 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3880 RTPTRUNION uStackFrame;
3881 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3882 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3883 if (rcStrict != VINF_SUCCESS)
3884 return rcStrict;
3885 void * const pvStackFrame = uStackFrame.pv;
3886
3887 if (fFlags & IEM_XCPT_FLAGS_ERR)
3888 *uStackFrame.pu64++ = uErr;
3889 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3890 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3891 uStackFrame.pu64[2] = fEfl;
3892 uStackFrame.pu64[3] = pCtx->rsp;
3893 uStackFrame.pu64[4] = pCtx->ss.Sel;
3894 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3895 if (rcStrict != VINF_SUCCESS)
3896 return rcStrict;
3897
3898 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3899 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3900 * after pushing the stack frame? (Write protect the gdt + stack to
3901 * find out.) */
3902 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3903 {
3904 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3905 if (rcStrict != VINF_SUCCESS)
3906 return rcStrict;
3907 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3908 }
3909
3910 /*
3911 * Start comitting the register changes.
3912 */
3913 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3914 * hidden registers when interrupting 32-bit or 16-bit code! */
3915 if (uNewCpl != pIemCpu->uCpl)
3916 {
3917 pCtx->ss.Sel = 0 | uNewCpl;
3918 pCtx->ss.ValidSel = 0 | uNewCpl;
3919 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3920 pCtx->ss.u32Limit = UINT32_MAX;
3921 pCtx->ss.u64Base = 0;
3922 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3923 }
3924 pCtx->rsp = uNewRsp - cbStackFrame;
3925 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3926 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3927 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3928 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3929 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3930 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3931 pCtx->rip = uNewRip;
3932 pIemCpu->uCpl = uNewCpl;
3933
3934 fEfl &= ~fEflToClear;
3935 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3936
3937 if (fFlags & IEM_XCPT_FLAGS_CR2)
3938 pCtx->cr2 = uCr2;
3939
3940 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3941 iemRaiseXcptAdjustState(pCtx, u8Vector);
3942
3943 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3944}
3945
3946
3947/**
3948 * Implements exceptions and interrupts.
3949 *
3950 * All exceptions and interrupts goes thru this function!
3951 *
3952 * @returns VBox strict status code.
3953 * @param pIemCpu The IEM per CPU instance data.
3954 * @param cbInstr The number of bytes to offset rIP by in the return
3955 * address.
3956 * @param u8Vector The interrupt / exception vector number.
3957 * @param fFlags The flags.
3958 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3959 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3960 */
3961DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3962iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3963 uint8_t cbInstr,
3964 uint8_t u8Vector,
3965 uint32_t fFlags,
3966 uint16_t uErr,
3967 uint64_t uCr2)
3968{
3969 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3970#ifdef IN_RING0
3971 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3972 AssertRCReturn(rc, rc);
3973#endif
3974
3975 /*
3976 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3977 */
3978 if ( pCtx->eflags.Bits.u1VM
3979 && pCtx->eflags.Bits.u2IOPL != 3
3980 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3981 && (pCtx->cr0 & X86_CR0_PE) )
3982 {
3983 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3984 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3985 u8Vector = X86_XCPT_GP;
3986 uErr = 0;
3987 }
3988#ifdef DBGFTRACE_ENABLED
3989 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3990 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3991 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3992#endif
3993
3994 /*
3995 * Do recursion accounting.
3996 */
3997 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3998 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3999 if (pIemCpu->cXcptRecursions == 0)
4000 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4001 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
4002 else
4003 {
4004 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4005 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
4006
4007 /** @todo double and tripple faults. */
4008 if (pIemCpu->cXcptRecursions >= 3)
4009 {
4010#ifdef DEBUG_bird
4011 AssertFailed();
4012#endif
4013 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4014 }
4015
4016 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
4017 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
4018 {
4019 ....
4020 } */
4021 }
4022 pIemCpu->cXcptRecursions++;
4023 pIemCpu->uCurXcpt = u8Vector;
4024 pIemCpu->fCurXcpt = fFlags;
4025
4026 /*
4027 * Extensive logging.
4028 */
4029#if defined(LOG_ENABLED) && defined(IN_RING3)
4030 if (LogIs3Enabled())
4031 {
4032 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4033 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4034 char szRegs[4096];
4035 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4036 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4037 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4038 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4039 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4040 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4041 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4042 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4043 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4044 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4045 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4046 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4047 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4048 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4049 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4050 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4051 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4052 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4053 " efer=%016VR{efer}\n"
4054 " pat=%016VR{pat}\n"
4055 " sf_mask=%016VR{sf_mask}\n"
4056 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4057 " lstar=%016VR{lstar}\n"
4058 " star=%016VR{star} cstar=%016VR{cstar}\n"
4059 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4060 );
4061
4062 char szInstr[256];
4063 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4064 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4065 szInstr, sizeof(szInstr), NULL);
4066 Log3(("%s%s\n", szRegs, szInstr));
4067 }
4068#endif /* LOG_ENABLED */
4069
4070 /*
4071 * Call the mode specific worker function.
4072 */
4073 VBOXSTRICTRC rcStrict;
4074 if (!(pCtx->cr0 & X86_CR0_PE))
4075 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4076 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4077 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4078 else
4079 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4080
4081 /*
4082 * Unwind.
4083 */
4084 pIemCpu->cXcptRecursions--;
4085 pIemCpu->uCurXcpt = uPrevXcpt;
4086 pIemCpu->fCurXcpt = fPrevXcpt;
4087 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4088 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4089 return rcStrict;
4090}
4091
4092
4093/** \#DE - 00. */
4094DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4095{
4096 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4097}
4098
4099
4100/** \#DB - 01.
4101 * @note This automatically clear DR7.GD. */
4102DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4103{
4104 /** @todo set/clear RF. */
4105 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4106 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4107}
4108
4109
4110/** \#UD - 06. */
4111DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4112{
4113 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4114}
4115
4116
4117/** \#NM - 07. */
4118DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4119{
4120 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4121}
4122
4123
4124/** \#TS(err) - 0a. */
4125DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4126{
4127 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4128}
4129
4130
4131/** \#TS(tr) - 0a. */
4132DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4133{
4134 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4135 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4136}
4137
4138
4139/** \#TS(0) - 0a. */
4140DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4141{
4142 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4143 0, 0);
4144}
4145
4146
4147/** \#TS(err) - 0a. */
4148DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4149{
4150 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4151 uSel & X86_SEL_MASK_OFF_RPL, 0);
4152}
4153
4154
4155/** \#NP(err) - 0b. */
4156DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4157{
4158 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4159}
4160
4161
4162/** \#NP(seg) - 0b. */
4163DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4164{
4165 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4166 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4167}
4168
4169
4170/** \#NP(sel) - 0b. */
4171DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4172{
4173 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4174 uSel & ~X86_SEL_RPL, 0);
4175}
4176
4177
4178/** \#SS(seg) - 0c. */
4179DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4180{
4181 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4182 uSel & ~X86_SEL_RPL, 0);
4183}
4184
4185
4186/** \#SS(err) - 0c. */
4187DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4188{
4189 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4190}
4191
4192
4193/** \#GP(n) - 0d. */
4194DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4195{
4196 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4197}
4198
4199
4200/** \#GP(0) - 0d. */
4201DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4202{
4203 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4204}
4205
4206
4207/** \#GP(sel) - 0d. */
4208DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4209{
4210 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4211 Sel & ~X86_SEL_RPL, 0);
4212}
4213
4214
4215/** \#GP(0) - 0d. */
4216DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4217{
4218 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4219}
4220
4221
4222/** \#GP(sel) - 0d. */
4223DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4224{
4225 NOREF(iSegReg); NOREF(fAccess);
4226 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4227 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4228}
4229
4230
4231/** \#GP(sel) - 0d. */
4232DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4233{
4234 NOREF(Sel);
4235 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4236}
4237
4238
4239/** \#GP(sel) - 0d. */
4240DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4241{
4242 NOREF(iSegReg); NOREF(fAccess);
4243 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4244}
4245
4246
4247/** \#PF(n) - 0e. */
4248DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4249{
4250 uint16_t uErr;
4251 switch (rc)
4252 {
4253 case VERR_PAGE_NOT_PRESENT:
4254 case VERR_PAGE_TABLE_NOT_PRESENT:
4255 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4256 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4257 uErr = 0;
4258 break;
4259
4260 default:
4261 AssertMsgFailed(("%Rrc\n", rc));
4262 case VERR_ACCESS_DENIED:
4263 uErr = X86_TRAP_PF_P;
4264 break;
4265
4266 /** @todo reserved */
4267 }
4268
4269 if (pIemCpu->uCpl == 3)
4270 uErr |= X86_TRAP_PF_US;
4271
4272 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4273 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4274 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4275 uErr |= X86_TRAP_PF_ID;
4276
4277#if 0 /* This is so much non-sense, really. Why was it done like that? */
4278 /* Note! RW access callers reporting a WRITE protection fault, will clear
4279 the READ flag before calling. So, read-modify-write accesses (RW)
4280 can safely be reported as READ faults. */
4281 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4282 uErr |= X86_TRAP_PF_RW;
4283#else
4284 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4285 {
4286 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4287 uErr |= X86_TRAP_PF_RW;
4288 }
4289#endif
4290
4291 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4292 uErr, GCPtrWhere);
4293}
4294
4295
4296/** \#MF(0) - 10. */
4297DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4298{
4299 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4300}
4301
4302
4303/** \#AC(0) - 11. */
4304DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4305{
4306 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4307}
4308
4309
4310/**
4311 * Macro for calling iemCImplRaiseDivideError().
4312 *
4313 * This enables us to add/remove arguments and force different levels of
4314 * inlining as we wish.
4315 *
4316 * @return Strict VBox status code.
4317 */
4318#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4319IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4320{
4321 NOREF(cbInstr);
4322 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4323}
4324
4325
4326/**
4327 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4328 *
4329 * This enables us to add/remove arguments and force different levels of
4330 * inlining as we wish.
4331 *
4332 * @return Strict VBox status code.
4333 */
4334#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4335IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4336{
4337 NOREF(cbInstr);
4338 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4339}
4340
4341
4342/**
4343 * Macro for calling iemCImplRaiseInvalidOpcode().
4344 *
4345 * This enables us to add/remove arguments and force different levels of
4346 * inlining as we wish.
4347 *
4348 * @return Strict VBox status code.
4349 */
4350#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4351IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4352{
4353 NOREF(cbInstr);
4354 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4355}
4356
4357
4358/** @} */
4359
4360
4361/*
4362 *
4363 * Helpers routines.
4364 * Helpers routines.
4365 * Helpers routines.
4366 *
4367 */
4368
4369/**
4370 * Recalculates the effective operand size.
4371 *
4372 * @param pIemCpu The IEM state.
4373 */
4374IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4375{
4376 switch (pIemCpu->enmCpuMode)
4377 {
4378 case IEMMODE_16BIT:
4379 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4380 break;
4381 case IEMMODE_32BIT:
4382 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4383 break;
4384 case IEMMODE_64BIT:
4385 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4386 {
4387 case 0:
4388 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4389 break;
4390 case IEM_OP_PRF_SIZE_OP:
4391 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4392 break;
4393 case IEM_OP_PRF_SIZE_REX_W:
4394 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4395 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4396 break;
4397 }
4398 break;
4399 default:
4400 AssertFailed();
4401 }
4402}
4403
4404
4405/**
4406 * Sets the default operand size to 64-bit and recalculates the effective
4407 * operand size.
4408 *
4409 * @param pIemCpu The IEM state.
4410 */
4411IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4412{
4413 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4414 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4415 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4416 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4417 else
4418 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4419}
4420
4421
4422/*
4423 *
4424 * Common opcode decoders.
4425 * Common opcode decoders.
4426 * Common opcode decoders.
4427 *
4428 */
4429//#include <iprt/mem.h>
4430
4431/**
4432 * Used to add extra details about a stub case.
4433 * @param pIemCpu The IEM per CPU state.
4434 */
4435IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4436{
4437#if defined(LOG_ENABLED) && defined(IN_RING3)
4438 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4439 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4440 char szRegs[4096];
4441 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4442 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4443 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4444 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4445 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4446 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4447 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4448 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4449 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4450 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4451 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4452 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4453 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4454 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4455 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4456 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4457 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4458 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4459 " efer=%016VR{efer}\n"
4460 " pat=%016VR{pat}\n"
4461 " sf_mask=%016VR{sf_mask}\n"
4462 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4463 " lstar=%016VR{lstar}\n"
4464 " star=%016VR{star} cstar=%016VR{cstar}\n"
4465 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4466 );
4467
4468 char szInstr[256];
4469 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4470 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4471 szInstr, sizeof(szInstr), NULL);
4472
4473 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4474#else
4475 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4476#endif
4477}
4478
4479/**
4480 * Complains about a stub.
4481 *
4482 * Providing two versions of this macro, one for daily use and one for use when
4483 * working on IEM.
4484 */
4485#if 0
4486# define IEMOP_BITCH_ABOUT_STUB() \
4487 do { \
4488 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4489 iemOpStubMsg2(pIemCpu); \
4490 RTAssertPanic(); \
4491 } while (0)
4492#else
4493# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4494#endif
4495
4496/** Stubs an opcode. */
4497#define FNIEMOP_STUB(a_Name) \
4498 FNIEMOP_DEF(a_Name) \
4499 { \
4500 IEMOP_BITCH_ABOUT_STUB(); \
4501 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4502 } \
4503 typedef int ignore_semicolon
4504
4505/** Stubs an opcode. */
4506#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4507 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4508 { \
4509 IEMOP_BITCH_ABOUT_STUB(); \
4510 NOREF(a_Name0); \
4511 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4512 } \
4513 typedef int ignore_semicolon
4514
4515/** Stubs an opcode which currently should raise \#UD. */
4516#define FNIEMOP_UD_STUB(a_Name) \
4517 FNIEMOP_DEF(a_Name) \
4518 { \
4519 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4520 return IEMOP_RAISE_INVALID_OPCODE(); \
4521 } \
4522 typedef int ignore_semicolon
4523
4524/** Stubs an opcode which currently should raise \#UD. */
4525#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4526 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4527 { \
4528 NOREF(a_Name0); \
4529 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4530 return IEMOP_RAISE_INVALID_OPCODE(); \
4531 } \
4532 typedef int ignore_semicolon
4533
4534
4535
4536/** @name Register Access.
4537 * @{
4538 */
4539
4540/**
4541 * Gets a reference (pointer) to the specified hidden segment register.
4542 *
4543 * @returns Hidden register reference.
4544 * @param pIemCpu The per CPU data.
4545 * @param iSegReg The segment register.
4546 */
4547IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4548{
4549 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4550 PCPUMSELREG pSReg;
4551 switch (iSegReg)
4552 {
4553 case X86_SREG_ES: pSReg = &pCtx->es; break;
4554 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4555 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4556 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4557 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4558 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4559 default:
4560 AssertFailedReturn(NULL);
4561 }
4562#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4563 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4564 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4565#else
4566 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4567#endif
4568 return pSReg;
4569}
4570
4571
4572/**
4573 * Ensures that the given hidden segment register is up to date.
4574 *
4575 * @returns Hidden register reference.
4576 * @param pIemCpu The per CPU data.
4577 * @param pSReg The segment register.
4578 */
4579IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
4580{
4581#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4582 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4583 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4584#else
4585 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4586 NOREF(pIemCpu);
4587#endif
4588 return pSReg;
4589}
4590
4591
4592/**
4593 * Gets a reference (pointer) to the specified segment register (the selector
4594 * value).
4595 *
4596 * @returns Pointer to the selector variable.
4597 * @param pIemCpu The per CPU data.
4598 * @param iSegReg The segment register.
4599 */
4600IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4601{
4602 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4603 switch (iSegReg)
4604 {
4605 case X86_SREG_ES: return &pCtx->es.Sel;
4606 case X86_SREG_CS: return &pCtx->cs.Sel;
4607 case X86_SREG_SS: return &pCtx->ss.Sel;
4608 case X86_SREG_DS: return &pCtx->ds.Sel;
4609 case X86_SREG_FS: return &pCtx->fs.Sel;
4610 case X86_SREG_GS: return &pCtx->gs.Sel;
4611 }
4612 AssertFailedReturn(NULL);
4613}
4614
4615
4616/**
4617 * Fetches the selector value of a segment register.
4618 *
4619 * @returns The selector value.
4620 * @param pIemCpu The per CPU data.
4621 * @param iSegReg The segment register.
4622 */
4623IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4624{
4625 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4626 switch (iSegReg)
4627 {
4628 case X86_SREG_ES: return pCtx->es.Sel;
4629 case X86_SREG_CS: return pCtx->cs.Sel;
4630 case X86_SREG_SS: return pCtx->ss.Sel;
4631 case X86_SREG_DS: return pCtx->ds.Sel;
4632 case X86_SREG_FS: return pCtx->fs.Sel;
4633 case X86_SREG_GS: return pCtx->gs.Sel;
4634 }
4635 AssertFailedReturn(0xffff);
4636}
4637
4638
4639/**
4640 * Gets a reference (pointer) to the specified general register.
4641 *
4642 * @returns Register reference.
4643 * @param pIemCpu The per CPU data.
4644 * @param iReg The general register.
4645 */
4646IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4647{
4648 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4649 switch (iReg)
4650 {
4651 case X86_GREG_xAX: return &pCtx->rax;
4652 case X86_GREG_xCX: return &pCtx->rcx;
4653 case X86_GREG_xDX: return &pCtx->rdx;
4654 case X86_GREG_xBX: return &pCtx->rbx;
4655 case X86_GREG_xSP: return &pCtx->rsp;
4656 case X86_GREG_xBP: return &pCtx->rbp;
4657 case X86_GREG_xSI: return &pCtx->rsi;
4658 case X86_GREG_xDI: return &pCtx->rdi;
4659 case X86_GREG_x8: return &pCtx->r8;
4660 case X86_GREG_x9: return &pCtx->r9;
4661 case X86_GREG_x10: return &pCtx->r10;
4662 case X86_GREG_x11: return &pCtx->r11;
4663 case X86_GREG_x12: return &pCtx->r12;
4664 case X86_GREG_x13: return &pCtx->r13;
4665 case X86_GREG_x14: return &pCtx->r14;
4666 case X86_GREG_x15: return &pCtx->r15;
4667 }
4668 AssertFailedReturn(NULL);
4669}
4670
4671
4672/**
4673 * Gets a reference (pointer) to the specified 8-bit general register.
4674 *
4675 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4676 *
4677 * @returns Register reference.
4678 * @param pIemCpu The per CPU data.
4679 * @param iReg The register.
4680 */
4681IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4682{
4683 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4684 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4685
4686 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4687 if (iReg >= 4)
4688 pu8Reg++;
4689 return pu8Reg;
4690}
4691
4692
4693/**
4694 * Fetches the value of a 8-bit general register.
4695 *
4696 * @returns The register value.
4697 * @param pIemCpu The per CPU data.
4698 * @param iReg The register.
4699 */
4700IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4701{
4702 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4703 return *pbSrc;
4704}
4705
4706
4707/**
4708 * Fetches the value of a 16-bit general register.
4709 *
4710 * @returns The register value.
4711 * @param pIemCpu The per CPU data.
4712 * @param iReg The register.
4713 */
4714IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4715{
4716 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4717}
4718
4719
4720/**
4721 * Fetches the value of a 32-bit general register.
4722 *
4723 * @returns The register value.
4724 * @param pIemCpu The per CPU data.
4725 * @param iReg The register.
4726 */
4727IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4728{
4729 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4730}
4731
4732
4733/**
4734 * Fetches the value of a 64-bit general register.
4735 *
4736 * @returns The register value.
4737 * @param pIemCpu The per CPU data.
4738 * @param iReg The register.
4739 */
4740IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4741{
4742 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4743}
4744
4745
4746/**
4747 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4748 *
4749 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4750 * segment limit.
4751 *
4752 * @param pIemCpu The per CPU data.
4753 * @param offNextInstr The offset of the next instruction.
4754 */
4755IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4756{
4757 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4758 switch (pIemCpu->enmEffOpSize)
4759 {
4760 case IEMMODE_16BIT:
4761 {
4762 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4763 if ( uNewIp > pCtx->cs.u32Limit
4764 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4765 return iemRaiseGeneralProtectionFault0(pIemCpu);
4766 pCtx->rip = uNewIp;
4767 break;
4768 }
4769
4770 case IEMMODE_32BIT:
4771 {
4772 Assert(pCtx->rip <= UINT32_MAX);
4773 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4774
4775 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4776 if (uNewEip > pCtx->cs.u32Limit)
4777 return iemRaiseGeneralProtectionFault0(pIemCpu);
4778 pCtx->rip = uNewEip;
4779 break;
4780 }
4781
4782 case IEMMODE_64BIT:
4783 {
4784 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4785
4786 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4787 if (!IEM_IS_CANONICAL(uNewRip))
4788 return iemRaiseGeneralProtectionFault0(pIemCpu);
4789 pCtx->rip = uNewRip;
4790 break;
4791 }
4792
4793 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4794 }
4795
4796 pCtx->eflags.Bits.u1RF = 0;
4797 return VINF_SUCCESS;
4798}
4799
4800
4801/**
4802 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4803 *
4804 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4805 * segment limit.
4806 *
4807 * @returns Strict VBox status code.
4808 * @param pIemCpu The per CPU data.
4809 * @param offNextInstr The offset of the next instruction.
4810 */
4811IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4812{
4813 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4814 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4815
4816 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4817 if ( uNewIp > pCtx->cs.u32Limit
4818 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4819 return iemRaiseGeneralProtectionFault0(pIemCpu);
4820 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4821 pCtx->rip = uNewIp;
4822 pCtx->eflags.Bits.u1RF = 0;
4823
4824 return VINF_SUCCESS;
4825}
4826
4827
4828/**
4829 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4830 *
4831 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4832 * segment limit.
4833 *
4834 * @returns Strict VBox status code.
4835 * @param pIemCpu The per CPU data.
4836 * @param offNextInstr The offset of the next instruction.
4837 */
4838IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4839{
4840 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4841 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4842
4843 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4844 {
4845 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4846
4847 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4848 if (uNewEip > pCtx->cs.u32Limit)
4849 return iemRaiseGeneralProtectionFault0(pIemCpu);
4850 pCtx->rip = uNewEip;
4851 }
4852 else
4853 {
4854 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4855
4856 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4857 if (!IEM_IS_CANONICAL(uNewRip))
4858 return iemRaiseGeneralProtectionFault0(pIemCpu);
4859 pCtx->rip = uNewRip;
4860 }
4861 pCtx->eflags.Bits.u1RF = 0;
4862 return VINF_SUCCESS;
4863}
4864
4865
4866/**
4867 * Performs a near jump to the specified address.
4868 *
4869 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4870 * segment limit.
4871 *
4872 * @param pIemCpu The per CPU data.
4873 * @param uNewRip The new RIP value.
4874 */
4875IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4876{
4877 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4878 switch (pIemCpu->enmEffOpSize)
4879 {
4880 case IEMMODE_16BIT:
4881 {
4882 Assert(uNewRip <= UINT16_MAX);
4883 if ( uNewRip > pCtx->cs.u32Limit
4884 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4885 return iemRaiseGeneralProtectionFault0(pIemCpu);
4886 /** @todo Test 16-bit jump in 64-bit mode. */
4887 pCtx->rip = uNewRip;
4888 break;
4889 }
4890
4891 case IEMMODE_32BIT:
4892 {
4893 Assert(uNewRip <= UINT32_MAX);
4894 Assert(pCtx->rip <= UINT32_MAX);
4895 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4896
4897 if (uNewRip > pCtx->cs.u32Limit)
4898 return iemRaiseGeneralProtectionFault0(pIemCpu);
4899 pCtx->rip = uNewRip;
4900 break;
4901 }
4902
4903 case IEMMODE_64BIT:
4904 {
4905 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4906
4907 if (!IEM_IS_CANONICAL(uNewRip))
4908 return iemRaiseGeneralProtectionFault0(pIemCpu);
4909 pCtx->rip = uNewRip;
4910 break;
4911 }
4912
4913 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4914 }
4915
4916 pCtx->eflags.Bits.u1RF = 0;
4917 return VINF_SUCCESS;
4918}
4919
4920
4921/**
4922 * Get the address of the top of the stack.
4923 *
4924 * @param pIemCpu The per CPU data.
4925 * @param pCtx The CPU context which SP/ESP/RSP should be
4926 * read.
4927 */
4928DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4929{
4930 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4931 return pCtx->rsp;
4932 if (pCtx->ss.Attr.n.u1DefBig)
4933 return pCtx->esp;
4934 return pCtx->sp;
4935}
4936
4937
4938/**
4939 * Updates the RIP/EIP/IP to point to the next instruction.
4940 *
4941 * This function leaves the EFLAGS.RF flag alone.
4942 *
4943 * @param pIemCpu The per CPU data.
4944 * @param cbInstr The number of bytes to add.
4945 */
4946IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4947{
4948 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4949 switch (pIemCpu->enmCpuMode)
4950 {
4951 case IEMMODE_16BIT:
4952 Assert(pCtx->rip <= UINT16_MAX);
4953 pCtx->eip += cbInstr;
4954 pCtx->eip &= UINT32_C(0xffff);
4955 break;
4956
4957 case IEMMODE_32BIT:
4958 pCtx->eip += cbInstr;
4959 Assert(pCtx->rip <= UINT32_MAX);
4960 break;
4961
4962 case IEMMODE_64BIT:
4963 pCtx->rip += cbInstr;
4964 break;
4965 default: AssertFailed();
4966 }
4967}
4968
4969
4970#if 0
4971/**
4972 * Updates the RIP/EIP/IP to point to the next instruction.
4973 *
4974 * @param pIemCpu The per CPU data.
4975 */
4976IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4977{
4978 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4979}
4980#endif
4981
4982
4983
4984/**
4985 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4986 *
4987 * @param pIemCpu The per CPU data.
4988 * @param cbInstr The number of bytes to add.
4989 */
4990IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4991{
4992 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4993
4994 pCtx->eflags.Bits.u1RF = 0;
4995
4996 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4997 switch (pIemCpu->enmCpuMode)
4998 {
4999 /** @todo investigate if EIP or RIP is really incremented. */
5000 case IEMMODE_16BIT:
5001 case IEMMODE_32BIT:
5002 pCtx->eip += cbInstr;
5003 Assert(pCtx->rip <= UINT32_MAX);
5004 break;
5005
5006 case IEMMODE_64BIT:
5007 pCtx->rip += cbInstr;
5008 break;
5009 default: AssertFailed();
5010 }
5011}
5012
5013
5014/**
5015 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
5016 *
5017 * @param pIemCpu The per CPU data.
5018 */
5019IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
5020{
5021 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
5022}
5023
5024
5025/**
5026 * Adds to the stack pointer.
5027 *
5028 * @param pIemCpu The per CPU data.
5029 * @param pCtx The CPU context which SP/ESP/RSP should be
5030 * updated.
5031 * @param cbToAdd The number of bytes to add (8-bit!).
5032 */
5033DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
5034{
5035 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5036 pCtx->rsp += cbToAdd;
5037 else if (pCtx->ss.Attr.n.u1DefBig)
5038 pCtx->esp += cbToAdd;
5039 else
5040 pCtx->sp += cbToAdd;
5041}
5042
5043
5044/**
5045 * Subtracts from the stack pointer.
5046 *
5047 * @param pIemCpu The per CPU data.
5048 * @param pCtx The CPU context which SP/ESP/RSP should be
5049 * updated.
5050 * @param cbToSub The number of bytes to subtract (8-bit!).
5051 */
5052DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
5053{
5054 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5055 pCtx->rsp -= cbToSub;
5056 else if (pCtx->ss.Attr.n.u1DefBig)
5057 pCtx->esp -= cbToSub;
5058 else
5059 pCtx->sp -= cbToSub;
5060}
5061
5062
5063/**
5064 * Adds to the temporary stack pointer.
5065 *
5066 * @param pIemCpu The per CPU data.
5067 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5068 * @param cbToAdd The number of bytes to add (16-bit).
5069 * @param pCtx Where to get the current stack mode.
5070 */
5071DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
5072{
5073 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5074 pTmpRsp->u += cbToAdd;
5075 else if (pCtx->ss.Attr.n.u1DefBig)
5076 pTmpRsp->DWords.dw0 += cbToAdd;
5077 else
5078 pTmpRsp->Words.w0 += cbToAdd;
5079}
5080
5081
5082/**
5083 * Subtracts from the temporary stack pointer.
5084 *
5085 * @param pIemCpu The per CPU data.
5086 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5087 * @param cbToSub The number of bytes to subtract.
5088 * @param pCtx Where to get the current stack mode.
5089 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5090 * expecting that.
5091 */
5092DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5093{
5094 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5095 pTmpRsp->u -= cbToSub;
5096 else if (pCtx->ss.Attr.n.u1DefBig)
5097 pTmpRsp->DWords.dw0 -= cbToSub;
5098 else
5099 pTmpRsp->Words.w0 -= cbToSub;
5100}
5101
5102
5103/**
5104 * Calculates the effective stack address for a push of the specified size as
5105 * well as the new RSP value (upper bits may be masked).
5106 *
5107 * @returns Effective stack addressf for the push.
5108 * @param pIemCpu The IEM per CPU data.
5109 * @param pCtx Where to get the current stack mode.
5110 * @param cbItem The size of the stack item to pop.
5111 * @param puNewRsp Where to return the new RSP value.
5112 */
5113DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5114{
5115 RTUINT64U uTmpRsp;
5116 RTGCPTR GCPtrTop;
5117 uTmpRsp.u = pCtx->rsp;
5118
5119 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5120 GCPtrTop = uTmpRsp.u -= cbItem;
5121 else if (pCtx->ss.Attr.n.u1DefBig)
5122 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5123 else
5124 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5125 *puNewRsp = uTmpRsp.u;
5126 return GCPtrTop;
5127}
5128
5129
5130/**
5131 * Gets the current stack pointer and calculates the value after a pop of the
5132 * specified size.
5133 *
5134 * @returns Current stack pointer.
5135 * @param pIemCpu The per CPU data.
5136 * @param pCtx Where to get the current stack mode.
5137 * @param cbItem The size of the stack item to pop.
5138 * @param puNewRsp Where to return the new RSP value.
5139 */
5140DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5141{
5142 RTUINT64U uTmpRsp;
5143 RTGCPTR GCPtrTop;
5144 uTmpRsp.u = pCtx->rsp;
5145
5146 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5147 {
5148 GCPtrTop = uTmpRsp.u;
5149 uTmpRsp.u += cbItem;
5150 }
5151 else if (pCtx->ss.Attr.n.u1DefBig)
5152 {
5153 GCPtrTop = uTmpRsp.DWords.dw0;
5154 uTmpRsp.DWords.dw0 += cbItem;
5155 }
5156 else
5157 {
5158 GCPtrTop = uTmpRsp.Words.w0;
5159 uTmpRsp.Words.w0 += cbItem;
5160 }
5161 *puNewRsp = uTmpRsp.u;
5162 return GCPtrTop;
5163}
5164
5165
5166/**
5167 * Calculates the effective stack address for a push of the specified size as
5168 * well as the new temporary RSP value (upper bits may be masked).
5169 *
5170 * @returns Effective stack addressf for the push.
5171 * @param pIemCpu The per CPU data.
5172 * @param pCtx Where to get the current stack mode.
5173 * @param pTmpRsp The temporary stack pointer. This is updated.
5174 * @param cbItem The size of the stack item to pop.
5175 */
5176DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5177{
5178 RTGCPTR GCPtrTop;
5179
5180 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5181 GCPtrTop = pTmpRsp->u -= cbItem;
5182 else if (pCtx->ss.Attr.n.u1DefBig)
5183 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5184 else
5185 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5186 return GCPtrTop;
5187}
5188
5189
5190/**
5191 * Gets the effective stack address for a pop of the specified size and
5192 * calculates and updates the temporary RSP.
5193 *
5194 * @returns Current stack pointer.
5195 * @param pIemCpu The per CPU data.
5196 * @param pCtx Where to get the current stack mode.
5197 * @param pTmpRsp The temporary stack pointer. This is updated.
5198 * @param cbItem The size of the stack item to pop.
5199 */
5200DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5201{
5202 RTGCPTR GCPtrTop;
5203 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5204 {
5205 GCPtrTop = pTmpRsp->u;
5206 pTmpRsp->u += cbItem;
5207 }
5208 else if (pCtx->ss.Attr.n.u1DefBig)
5209 {
5210 GCPtrTop = pTmpRsp->DWords.dw0;
5211 pTmpRsp->DWords.dw0 += cbItem;
5212 }
5213 else
5214 {
5215 GCPtrTop = pTmpRsp->Words.w0;
5216 pTmpRsp->Words.w0 += cbItem;
5217 }
5218 return GCPtrTop;
5219}
5220
5221/** @} */
5222
5223
5224/** @name FPU access and helpers.
5225 *
5226 * @{
5227 */
5228
5229
5230/**
5231 * Hook for preparing to use the host FPU.
5232 *
5233 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
5234 *
5235 * @param pIemCpu The IEM per CPU data.
5236 */
5237DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5238{
5239#ifdef IN_RING3
5240 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
5241#else
5242 CPUMRZFpuStatePrepareHostCpuForUse(IEMCPU_TO_VMCPU(pIemCpu));
5243#endif
5244}
5245
5246
5247/**
5248 * Hook for preparing to use the host FPU for SSE
5249 *
5250 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
5251 *
5252 * @param pIemCpu The IEM per CPU data.
5253 */
5254DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5255{
5256 iemFpuPrepareUsage(pIemCpu);
5257}
5258
5259
5260/**
5261 * Hook for actualizing the guest FPU state before the interpreter reads it.
5262 *
5263 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
5264 *
5265 * @param pIemCpu The IEM per CPU data.
5266 */
5267DECLINLINE(void) iemFpuActualizeStateForRead(PIEMCPU pIemCpu)
5268{
5269#ifdef IN_RING3
5270 NOREF(pIemCpu);
5271#else
5272 CPUMRZFpuStateActualizeForRead(IEMCPU_TO_VMCPU(pIemCpu));
5273#endif
5274}
5275
5276
5277/**
5278 * Hook for actualizing the guest FPU state before the interpreter changes it.
5279 *
5280 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
5281 *
5282 * @param pIemCpu The IEM per CPU data.
5283 */
5284DECLINLINE(void) iemFpuActualizeStateForChange(PIEMCPU pIemCpu)
5285{
5286#ifdef IN_RING3
5287 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
5288#else
5289 CPUMRZFpuStateActualizeForChange(IEMCPU_TO_VMCPU(pIemCpu));
5290#endif
5291}
5292
5293
5294/**
5295 * Hook for actualizing the guest XMM0..15 register state for read only.
5296 *
5297 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
5298 *
5299 * @param pIemCpu The IEM per CPU data.
5300 */
5301DECLINLINE(void) iemFpuActualizeSseStateForRead(PIEMCPU pIemCpu)
5302{
5303#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
5304 NOREF(pIemCpu);
5305#else
5306 CPUMRZFpuStateActualizeSseForRead(IEMCPU_TO_VMCPU(pIemCpu));
5307#endif
5308}
5309
5310
5311/**
5312 * Hook for actualizing the guest XMM0..15 register state for read+write.
5313 *
5314 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
5315 *
5316 * @param pIemCpu The IEM per CPU data.
5317 */
5318DECLINLINE(void) iemFpuActualizeSseStateForChange(PIEMCPU pIemCpu)
5319{
5320#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
5321 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
5322#else
5323 CPUMRZFpuStateActualizeForChange(IEMCPU_TO_VMCPU(pIemCpu));
5324#endif
5325}
5326
5327
5328/**
5329 * Stores a QNaN value into a FPU register.
5330 *
5331 * @param pReg Pointer to the register.
5332 */
5333DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5334{
5335 pReg->au32[0] = UINT32_C(0x00000000);
5336 pReg->au32[1] = UINT32_C(0xc0000000);
5337 pReg->au16[4] = UINT16_C(0xffff);
5338}
5339
5340
5341/**
5342 * Updates the FOP, FPU.CS and FPUIP registers.
5343 *
5344 * @param pIemCpu The IEM per CPU data.
5345 * @param pCtx The CPU context.
5346 * @param pFpuCtx The FPU context.
5347 */
5348DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5349{
5350 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5351 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5352 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5353 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5354 {
5355 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5356 * happens in real mode here based on the fnsave and fnstenv images. */
5357 pFpuCtx->CS = 0;
5358 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5359 }
5360 else
5361 {
5362 pFpuCtx->CS = pCtx->cs.Sel;
5363 pFpuCtx->FPUIP = pCtx->rip;
5364 }
5365}
5366
5367
5368/**
5369 * Updates the x87.DS and FPUDP registers.
5370 *
5371 * @param pIemCpu The IEM per CPU data.
5372 * @param pCtx The CPU context.
5373 * @param pFpuCtx The FPU context.
5374 * @param iEffSeg The effective segment register.
5375 * @param GCPtrEff The effective address relative to @a iEffSeg.
5376 */
5377DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5378{
5379 RTSEL sel;
5380 switch (iEffSeg)
5381 {
5382 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5383 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5384 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5385 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5386 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5387 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5388 default:
5389 AssertMsgFailed(("%d\n", iEffSeg));
5390 sel = pCtx->ds.Sel;
5391 }
5392 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5393 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5394 {
5395 pFpuCtx->DS = 0;
5396 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5397 }
5398 else
5399 {
5400 pFpuCtx->DS = sel;
5401 pFpuCtx->FPUDP = GCPtrEff;
5402 }
5403}
5404
5405
5406/**
5407 * Rotates the stack registers in the push direction.
5408 *
5409 * @param pFpuCtx The FPU context.
5410 * @remarks This is a complete waste of time, but fxsave stores the registers in
5411 * stack order.
5412 */
5413DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5414{
5415 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5416 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5417 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5418 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5419 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5420 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5421 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5422 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5423 pFpuCtx->aRegs[0].r80 = r80Tmp;
5424}
5425
5426
5427/**
5428 * Rotates the stack registers in the pop direction.
5429 *
5430 * @param pFpuCtx The FPU context.
5431 * @remarks This is a complete waste of time, but fxsave stores the registers in
5432 * stack order.
5433 */
5434DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5435{
5436 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5437 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5438 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5439 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5440 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5441 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5442 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5443 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5444 pFpuCtx->aRegs[7].r80 = r80Tmp;
5445}
5446
5447
5448/**
5449 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5450 * exception prevents it.
5451 *
5452 * @param pIemCpu The IEM per CPU data.
5453 * @param pResult The FPU operation result to push.
5454 * @param pFpuCtx The FPU context.
5455 */
5456IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5457{
5458 /* Update FSW and bail if there are pending exceptions afterwards. */
5459 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5460 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5461 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5462 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5463 {
5464 pFpuCtx->FSW = fFsw;
5465 return;
5466 }
5467
5468 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5469 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5470 {
5471 /* All is fine, push the actual value. */
5472 pFpuCtx->FTW |= RT_BIT(iNewTop);
5473 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5474 }
5475 else if (pFpuCtx->FCW & X86_FCW_IM)
5476 {
5477 /* Masked stack overflow, push QNaN. */
5478 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5479 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5480 }
5481 else
5482 {
5483 /* Raise stack overflow, don't push anything. */
5484 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5485 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5486 return;
5487 }
5488
5489 fFsw &= ~X86_FSW_TOP_MASK;
5490 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5491 pFpuCtx->FSW = fFsw;
5492
5493 iemFpuRotateStackPush(pFpuCtx);
5494}
5495
5496
5497/**
5498 * Stores a result in a FPU register and updates the FSW and FTW.
5499 *
5500 * @param pFpuCtx The FPU context.
5501 * @param pResult The result to store.
5502 * @param iStReg Which FPU register to store it in.
5503 */
5504IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5505{
5506 Assert(iStReg < 8);
5507 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5508 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5509 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5510 pFpuCtx->FTW |= RT_BIT(iReg);
5511 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5512}
5513
5514
5515/**
5516 * Only updates the FPU status word (FSW) with the result of the current
5517 * instruction.
5518 *
5519 * @param pFpuCtx The FPU context.
5520 * @param u16FSW The FSW output of the current instruction.
5521 */
5522IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5523{
5524 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5525 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5526}
5527
5528
5529/**
5530 * Pops one item off the FPU stack if no pending exception prevents it.
5531 *
5532 * @param pFpuCtx The FPU context.
5533 */
5534IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5535{
5536 /* Check pending exceptions. */
5537 uint16_t uFSW = pFpuCtx->FSW;
5538 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5539 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5540 return;
5541
5542 /* TOP--. */
5543 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5544 uFSW &= ~X86_FSW_TOP_MASK;
5545 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5546 pFpuCtx->FSW = uFSW;
5547
5548 /* Mark the previous ST0 as empty. */
5549 iOldTop >>= X86_FSW_TOP_SHIFT;
5550 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5551
5552 /* Rotate the registers. */
5553 iemFpuRotateStackPop(pFpuCtx);
5554}
5555
5556
5557/**
5558 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5559 *
5560 * @param pIemCpu The IEM per CPU data.
5561 * @param pResult The FPU operation result to push.
5562 */
5563IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5564{
5565 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5566 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5567 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5568 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5569}
5570
5571
5572/**
5573 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5574 * and sets FPUDP and FPUDS.
5575 *
5576 * @param pIemCpu The IEM per CPU data.
5577 * @param pResult The FPU operation result to push.
5578 * @param iEffSeg The effective segment register.
5579 * @param GCPtrEff The effective address relative to @a iEffSeg.
5580 */
5581IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5582{
5583 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5584 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5585 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5586 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5587 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5588}
5589
5590
5591/**
5592 * Replace ST0 with the first value and push the second onto the FPU stack,
5593 * unless a pending exception prevents it.
5594 *
5595 * @param pIemCpu The IEM per CPU data.
5596 * @param pResult The FPU operation result to store and push.
5597 */
5598IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5599{
5600 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5601 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5602 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5603
5604 /* Update FSW and bail if there are pending exceptions afterwards. */
5605 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5606 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5607 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5608 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5609 {
5610 pFpuCtx->FSW = fFsw;
5611 return;
5612 }
5613
5614 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5615 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5616 {
5617 /* All is fine, push the actual value. */
5618 pFpuCtx->FTW |= RT_BIT(iNewTop);
5619 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5620 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5621 }
5622 else if (pFpuCtx->FCW & X86_FCW_IM)
5623 {
5624 /* Masked stack overflow, push QNaN. */
5625 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5626 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5627 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5628 }
5629 else
5630 {
5631 /* Raise stack overflow, don't push anything. */
5632 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5633 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5634 return;
5635 }
5636
5637 fFsw &= ~X86_FSW_TOP_MASK;
5638 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5639 pFpuCtx->FSW = fFsw;
5640
5641 iemFpuRotateStackPush(pFpuCtx);
5642}
5643
5644
5645/**
5646 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5647 * FOP.
5648 *
5649 * @param pIemCpu The IEM per CPU data.
5650 * @param pResult The result to store.
5651 * @param iStReg Which FPU register to store it in.
5652 */
5653IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5654{
5655 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5656 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5657 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5658 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5659}
5660
5661
5662/**
5663 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5664 * FOP, and then pops the stack.
5665 *
5666 * @param pIemCpu The IEM per CPU data.
5667 * @param pResult The result to store.
5668 * @param iStReg Which FPU register to store it in.
5669 */
5670IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5671{
5672 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5673 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5674 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5675 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5676 iemFpuMaybePopOne(pFpuCtx);
5677}
5678
5679
5680/**
5681 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5682 * FPUDP, and FPUDS.
5683 *
5684 * @param pIemCpu The IEM per CPU data.
5685 * @param pResult The result to store.
5686 * @param iStReg Which FPU register to store it in.
5687 * @param iEffSeg The effective memory operand selector register.
5688 * @param GCPtrEff The effective memory operand offset.
5689 */
5690IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5691 uint8_t iEffSeg, RTGCPTR GCPtrEff)
5692{
5693 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5694 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5695 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5696 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5697 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5698}
5699
5700
5701/**
5702 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5703 * FPUDP, and FPUDS, and then pops the stack.
5704 *
5705 * @param pIemCpu The IEM per CPU data.
5706 * @param pResult The result to store.
5707 * @param iStReg Which FPU register to store it in.
5708 * @param iEffSeg The effective memory operand selector register.
5709 * @param GCPtrEff The effective memory operand offset.
5710 */
5711IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5712 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5713{
5714 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5715 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5716 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5717 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5718 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5719 iemFpuMaybePopOne(pFpuCtx);
5720}
5721
5722
5723/**
5724 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5725 *
5726 * @param pIemCpu The IEM per CPU data.
5727 */
5728IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5729{
5730 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5731 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5732 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5733}
5734
5735
5736/**
5737 * Marks the specified stack register as free (for FFREE).
5738 *
5739 * @param pIemCpu The IEM per CPU data.
5740 * @param iStReg The register to free.
5741 */
5742IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5743{
5744 Assert(iStReg < 8);
5745 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5746 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5747 pFpuCtx->FTW &= ~RT_BIT(iReg);
5748}
5749
5750
5751/**
5752 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5753 *
5754 * @param pIemCpu The IEM per CPU data.
5755 */
5756IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5757{
5758 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5759 uint16_t uFsw = pFpuCtx->FSW;
5760 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5761 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5762 uFsw &= ~X86_FSW_TOP_MASK;
5763 uFsw |= uTop;
5764 pFpuCtx->FSW = uFsw;
5765}
5766
5767
5768/**
5769 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5770 *
5771 * @param pIemCpu The IEM per CPU data.
5772 */
5773IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5774{
5775 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5776 uint16_t uFsw = pFpuCtx->FSW;
5777 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5778 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5779 uFsw &= ~X86_FSW_TOP_MASK;
5780 uFsw |= uTop;
5781 pFpuCtx->FSW = uFsw;
5782}
5783
5784
5785/**
5786 * Updates the FSW, FOP, FPUIP, and FPUCS.
5787 *
5788 * @param pIemCpu The IEM per CPU data.
5789 * @param u16FSW The FSW from the current instruction.
5790 */
5791IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5792{
5793 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5794 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5795 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5796 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5797}
5798
5799
5800/**
5801 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5802 *
5803 * @param pIemCpu The IEM per CPU data.
5804 * @param u16FSW The FSW from the current instruction.
5805 */
5806IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5807{
5808 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5809 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5810 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5811 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5812 iemFpuMaybePopOne(pFpuCtx);
5813}
5814
5815
5816/**
5817 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5818 *
5819 * @param pIemCpu The IEM per CPU data.
5820 * @param u16FSW The FSW from the current instruction.
5821 * @param iEffSeg The effective memory operand selector register.
5822 * @param GCPtrEff The effective memory operand offset.
5823 */
5824IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5825{
5826 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5827 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5828 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5829 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5830 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5831}
5832
5833
5834/**
5835 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5836 *
5837 * @param pIemCpu The IEM per CPU data.
5838 * @param u16FSW The FSW from the current instruction.
5839 */
5840IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5841{
5842 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5843 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5844 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5845 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5846 iemFpuMaybePopOne(pFpuCtx);
5847 iemFpuMaybePopOne(pFpuCtx);
5848}
5849
5850
5851/**
5852 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5853 *
5854 * @param pIemCpu The IEM per CPU data.
5855 * @param u16FSW The FSW from the current instruction.
5856 * @param iEffSeg The effective memory operand selector register.
5857 * @param GCPtrEff The effective memory operand offset.
5858 */
5859IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5860{
5861 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5862 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5863 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5864 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5865 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5866 iemFpuMaybePopOne(pFpuCtx);
5867}
5868
5869
5870/**
5871 * Worker routine for raising an FPU stack underflow exception.
5872 *
5873 * @param pIemCpu The IEM per CPU data.
5874 * @param pFpuCtx The FPU context.
5875 * @param iStReg The stack register being accessed.
5876 */
5877IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5878{
5879 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5880 if (pFpuCtx->FCW & X86_FCW_IM)
5881 {
5882 /* Masked underflow. */
5883 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5884 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5885 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5886 if (iStReg != UINT8_MAX)
5887 {
5888 pFpuCtx->FTW |= RT_BIT(iReg);
5889 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5890 }
5891 }
5892 else
5893 {
5894 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5895 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5896 }
5897}
5898
5899
5900/**
5901 * Raises a FPU stack underflow exception.
5902 *
5903 * @param pIemCpu The IEM per CPU data.
5904 * @param iStReg The destination register that should be loaded
5905 * with QNaN if \#IS is not masked. Specify
5906 * UINT8_MAX if none (like for fcom).
5907 */
5908DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5909{
5910 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5911 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5912 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5913 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5914}
5915
5916
5917DECL_NO_INLINE(IEM_STATIC, void)
5918iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5919{
5920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5921 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5922 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5923 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5924 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5925}
5926
5927
5928DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5929{
5930 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5931 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5932 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5933 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5934 iemFpuMaybePopOne(pFpuCtx);
5935}
5936
5937
5938DECL_NO_INLINE(IEM_STATIC, void)
5939iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5940{
5941 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5942 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5943 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5944 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5945 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5946 iemFpuMaybePopOne(pFpuCtx);
5947}
5948
5949
5950DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5951{
5952 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5953 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5954 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5955 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5956 iemFpuMaybePopOne(pFpuCtx);
5957 iemFpuMaybePopOne(pFpuCtx);
5958}
5959
5960
5961DECL_NO_INLINE(IEM_STATIC, void)
5962iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5963{
5964 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5965 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5966 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5967
5968 if (pFpuCtx->FCW & X86_FCW_IM)
5969 {
5970 /* Masked overflow - Push QNaN. */
5971 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5972 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5973 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5974 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5975 pFpuCtx->FTW |= RT_BIT(iNewTop);
5976 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5977 iemFpuRotateStackPush(pFpuCtx);
5978 }
5979 else
5980 {
5981 /* Exception pending - don't change TOP or the register stack. */
5982 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5983 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5984 }
5985}
5986
5987
5988DECL_NO_INLINE(IEM_STATIC, void)
5989iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5990{
5991 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5992 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5993 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5994
5995 if (pFpuCtx->FCW & X86_FCW_IM)
5996 {
5997 /* Masked overflow - Push QNaN. */
5998 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5999 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6000 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6001 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6002 pFpuCtx->FTW |= RT_BIT(iNewTop);
6003 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6004 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6005 iemFpuRotateStackPush(pFpuCtx);
6006 }
6007 else
6008 {
6009 /* Exception pending - don't change TOP or the register stack. */
6010 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6011 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6012 }
6013}
6014
6015
6016/**
6017 * Worker routine for raising an FPU stack overflow exception on a push.
6018 *
6019 * @param pFpuCtx The FPU context.
6020 */
6021IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
6022{
6023 if (pFpuCtx->FCW & X86_FCW_IM)
6024 {
6025 /* Masked overflow. */
6026 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6027 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6028 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
6029 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6030 pFpuCtx->FTW |= RT_BIT(iNewTop);
6031 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6032 iemFpuRotateStackPush(pFpuCtx);
6033 }
6034 else
6035 {
6036 /* Exception pending - don't change TOP or the register stack. */
6037 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6038 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6039 }
6040}
6041
6042
6043/**
6044 * Raises a FPU stack overflow exception on a push.
6045 *
6046 * @param pIemCpu The IEM per CPU data.
6047 */
6048DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
6049{
6050 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6051 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6052 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6053 iemFpuStackPushOverflowOnly(pFpuCtx);
6054}
6055
6056
6057/**
6058 * Raises a FPU stack overflow exception on a push with a memory operand.
6059 *
6060 * @param pIemCpu The IEM per CPU data.
6061 * @param iEffSeg The effective memory operand selector register.
6062 * @param GCPtrEff The effective memory operand offset.
6063 */
6064DECL_NO_INLINE(IEM_STATIC, void)
6065iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6066{
6067 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6068 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6069 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6070 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6071 iemFpuStackPushOverflowOnly(pFpuCtx);
6072}
6073
6074
6075IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
6076{
6077 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6078 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6079 if (pFpuCtx->FTW & RT_BIT(iReg))
6080 return VINF_SUCCESS;
6081 return VERR_NOT_FOUND;
6082}
6083
6084
6085IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
6086{
6087 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6088 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6089 if (pFpuCtx->FTW & RT_BIT(iReg))
6090 {
6091 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
6092 return VINF_SUCCESS;
6093 }
6094 return VERR_NOT_FOUND;
6095}
6096
6097
6098IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
6099 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
6100{
6101 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6102 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6103 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6104 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6105 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6106 {
6107 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6108 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
6109 return VINF_SUCCESS;
6110 }
6111 return VERR_NOT_FOUND;
6112}
6113
6114
6115IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
6116{
6117 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6118 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6119 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6120 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6121 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6122 {
6123 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6124 return VINF_SUCCESS;
6125 }
6126 return VERR_NOT_FOUND;
6127}
6128
6129
6130/**
6131 * Updates the FPU exception status after FCW is changed.
6132 *
6133 * @param pFpuCtx The FPU context.
6134 */
6135IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
6136{
6137 uint16_t u16Fsw = pFpuCtx->FSW;
6138 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
6139 u16Fsw |= X86_FSW_ES | X86_FSW_B;
6140 else
6141 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
6142 pFpuCtx->FSW = u16Fsw;
6143}
6144
6145
6146/**
6147 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6148 *
6149 * @returns The full FTW.
6150 * @param pFpuCtx The FPU context.
6151 */
6152IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6153{
6154 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6155 uint16_t u16Ftw = 0;
6156 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6157 for (unsigned iSt = 0; iSt < 8; iSt++)
6158 {
6159 unsigned const iReg = (iSt + iTop) & 7;
6160 if (!(u8Ftw & RT_BIT(iReg)))
6161 u16Ftw |= 3 << (iReg * 2); /* empty */
6162 else
6163 {
6164 uint16_t uTag;
6165 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6166 if (pr80Reg->s.uExponent == 0x7fff)
6167 uTag = 2; /* Exponent is all 1's => Special. */
6168 else if (pr80Reg->s.uExponent == 0x0000)
6169 {
6170 if (pr80Reg->s.u64Mantissa == 0x0000)
6171 uTag = 1; /* All bits are zero => Zero. */
6172 else
6173 uTag = 2; /* Must be special. */
6174 }
6175 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6176 uTag = 0; /* Valid. */
6177 else
6178 uTag = 2; /* Must be special. */
6179
6180 u16Ftw |= uTag << (iReg * 2); /* empty */
6181 }
6182 }
6183
6184 return u16Ftw;
6185}
6186
6187
6188/**
6189 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6190 *
6191 * @returns The compressed FTW.
6192 * @param u16FullFtw The full FTW to convert.
6193 */
6194IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6195{
6196 uint8_t u8Ftw = 0;
6197 for (unsigned i = 0; i < 8; i++)
6198 {
6199 if ((u16FullFtw & 3) != 3 /*empty*/)
6200 u8Ftw |= RT_BIT(i);
6201 u16FullFtw >>= 2;
6202 }
6203
6204 return u8Ftw;
6205}
6206
6207/** @} */
6208
6209
6210/** @name Memory access.
6211 *
6212 * @{
6213 */
6214
6215
6216/**
6217 * Updates the IEMCPU::cbWritten counter if applicable.
6218 *
6219 * @param pIemCpu The IEM per CPU data.
6220 * @param fAccess The access being accounted for.
6221 * @param cbMem The access size.
6222 */
6223DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6224{
6225 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6226 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6227 pIemCpu->cbWritten += (uint32_t)cbMem;
6228}
6229
6230
6231/**
6232 * Checks if the given segment can be written to, raise the appropriate
6233 * exception if not.
6234 *
6235 * @returns VBox strict status code.
6236 *
6237 * @param pIemCpu The IEM per CPU data.
6238 * @param pHid Pointer to the hidden register.
6239 * @param iSegReg The register number.
6240 * @param pu64BaseAddr Where to return the base address to use for the
6241 * segment. (In 64-bit code it may differ from the
6242 * base in the hidden segment.)
6243 */
6244IEM_STATIC VBOXSTRICTRC
6245iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6246{
6247 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6248 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6249 else
6250 {
6251 if (!pHid->Attr.n.u1Present)
6252 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6253
6254 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6255 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6256 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6257 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6258 *pu64BaseAddr = pHid->u64Base;
6259 }
6260 return VINF_SUCCESS;
6261}
6262
6263
6264/**
6265 * Checks if the given segment can be read from, raise the appropriate
6266 * exception if not.
6267 *
6268 * @returns VBox strict status code.
6269 *
6270 * @param pIemCpu The IEM per CPU data.
6271 * @param pHid Pointer to the hidden register.
6272 * @param iSegReg The register number.
6273 * @param pu64BaseAddr Where to return the base address to use for the
6274 * segment. (In 64-bit code it may differ from the
6275 * base in the hidden segment.)
6276 */
6277IEM_STATIC VBOXSTRICTRC
6278iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6279{
6280 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6281 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6282 else
6283 {
6284 if (!pHid->Attr.n.u1Present)
6285 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6286
6287 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6288 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6289 *pu64BaseAddr = pHid->u64Base;
6290 }
6291 return VINF_SUCCESS;
6292}
6293
6294
6295/**
6296 * Applies the segment limit, base and attributes.
6297 *
6298 * This may raise a \#GP or \#SS.
6299 *
6300 * @returns VBox strict status code.
6301 *
6302 * @param pIemCpu The IEM per CPU data.
6303 * @param fAccess The kind of access which is being performed.
6304 * @param iSegReg The index of the segment register to apply.
6305 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6306 * TSS, ++).
6307 * @param cbMem The access size.
6308 * @param pGCPtrMem Pointer to the guest memory address to apply
6309 * segmentation to. Input and output parameter.
6310 */
6311IEM_STATIC VBOXSTRICTRC
6312iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6313{
6314 if (iSegReg == UINT8_MAX)
6315 return VINF_SUCCESS;
6316
6317 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6318 switch (pIemCpu->enmCpuMode)
6319 {
6320 case IEMMODE_16BIT:
6321 case IEMMODE_32BIT:
6322 {
6323 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6324 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6325
6326 if ( pSel->Attr.n.u1Present
6327 && !pSel->Attr.n.u1Unusable)
6328 {
6329 Assert(pSel->Attr.n.u1DescType);
6330 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6331 {
6332 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6333 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6334 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6335
6336 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6337 {
6338 /** @todo CPL check. */
6339 }
6340
6341 /*
6342 * There are two kinds of data selectors, normal and expand down.
6343 */
6344 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6345 {
6346 if ( GCPtrFirst32 > pSel->u32Limit
6347 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6348 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6349 }
6350 else
6351 {
6352 /*
6353 * The upper boundary is defined by the B bit, not the G bit!
6354 */
6355 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6356 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6357 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6358 }
6359 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6360 }
6361 else
6362 {
6363
6364 /*
6365 * Code selector and usually be used to read thru, writing is
6366 * only permitted in real and V8086 mode.
6367 */
6368 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6369 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6370 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6371 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6372 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6373
6374 if ( GCPtrFirst32 > pSel->u32Limit
6375 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6376 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6377
6378 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6379 {
6380 /** @todo CPL check. */
6381 }
6382
6383 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6384 }
6385 }
6386 else
6387 return iemRaiseGeneralProtectionFault0(pIemCpu);
6388 return VINF_SUCCESS;
6389 }
6390
6391 case IEMMODE_64BIT:
6392 {
6393 RTGCPTR GCPtrMem = *pGCPtrMem;
6394 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6395 *pGCPtrMem = GCPtrMem + pSel->u64Base;
6396
6397 Assert(cbMem >= 1);
6398 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
6399 return VINF_SUCCESS;
6400 return iemRaiseGeneralProtectionFault0(pIemCpu);
6401 }
6402
6403 default:
6404 AssertFailedReturn(VERR_IEM_IPE_7);
6405 }
6406}
6407
6408
6409/**
6410 * Translates a virtual address to a physical physical address and checks if we
6411 * can access the page as specified.
6412 *
6413 * @param pIemCpu The IEM per CPU data.
6414 * @param GCPtrMem The virtual address.
6415 * @param fAccess The intended access.
6416 * @param pGCPhysMem Where to return the physical address.
6417 */
6418IEM_STATIC VBOXSTRICTRC
6419iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6420{
6421 /** @todo Need a different PGM interface here. We're currently using
6422 * generic / REM interfaces. this won't cut it for R0 & RC. */
6423 RTGCPHYS GCPhys;
6424 uint64_t fFlags;
6425 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6426 if (RT_FAILURE(rc))
6427 {
6428 /** @todo Check unassigned memory in unpaged mode. */
6429 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6430 *pGCPhysMem = NIL_RTGCPHYS;
6431 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6432 }
6433
6434 /* If the page is writable and does not have the no-exec bit set, all
6435 access is allowed. Otherwise we'll have to check more carefully... */
6436 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6437 {
6438 /* Write to read only memory? */
6439 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6440 && !(fFlags & X86_PTE_RW)
6441 && ( pIemCpu->uCpl != 0
6442 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6443 {
6444 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6445 *pGCPhysMem = NIL_RTGCPHYS;
6446 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6447 }
6448
6449 /* Kernel memory accessed by userland? */
6450 if ( !(fFlags & X86_PTE_US)
6451 && pIemCpu->uCpl == 3
6452 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6453 {
6454 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6455 *pGCPhysMem = NIL_RTGCPHYS;
6456 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6457 }
6458
6459 /* Executing non-executable memory? */
6460 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6461 && (fFlags & X86_PTE_PAE_NX)
6462 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6463 {
6464 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6465 *pGCPhysMem = NIL_RTGCPHYS;
6466 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6467 VERR_ACCESS_DENIED);
6468 }
6469 }
6470
6471 /*
6472 * Set the dirty / access flags.
6473 * ASSUMES this is set when the address is translated rather than on committ...
6474 */
6475 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6476 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6477 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6478 {
6479 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6480 AssertRC(rc2);
6481 }
6482
6483 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6484 *pGCPhysMem = GCPhys;
6485 return VINF_SUCCESS;
6486}
6487
6488
6489
6490/**
6491 * Maps a physical page.
6492 *
6493 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6494 * @param pIemCpu The IEM per CPU data.
6495 * @param GCPhysMem The physical address.
6496 * @param fAccess The intended access.
6497 * @param ppvMem Where to return the mapping address.
6498 * @param pLock The PGM lock.
6499 */
6500IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6501{
6502#ifdef IEM_VERIFICATION_MODE_FULL
6503 /* Force the alternative path so we can ignore writes. */
6504 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6505 {
6506 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6507 {
6508 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6509 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6510 if (RT_FAILURE(rc2))
6511 pIemCpu->fProblematicMemory = true;
6512 }
6513 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6514 }
6515#endif
6516#ifdef IEM_LOG_MEMORY_WRITES
6517 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6518 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6519#endif
6520#ifdef IEM_VERIFICATION_MODE_MINIMAL
6521 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6522#endif
6523
6524 /** @todo This API may require some improving later. A private deal with PGM
6525 * regarding locking and unlocking needs to be struct. A couple of TLBs
6526 * living in PGM, but with publicly accessible inlined access methods
6527 * could perhaps be an even better solution. */
6528 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6529 GCPhysMem,
6530 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6531 pIemCpu->fBypassHandlers,
6532 ppvMem,
6533 pLock);
6534 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6535 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6536
6537#ifdef IEM_VERIFICATION_MODE_FULL
6538 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6539 pIemCpu->fProblematicMemory = true;
6540#endif
6541 return rc;
6542}
6543
6544
6545/**
6546 * Unmap a page previously mapped by iemMemPageMap.
6547 *
6548 * @param pIemCpu The IEM per CPU data.
6549 * @param GCPhysMem The physical address.
6550 * @param fAccess The intended access.
6551 * @param pvMem What iemMemPageMap returned.
6552 * @param pLock The PGM lock.
6553 */
6554DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6555{
6556 NOREF(pIemCpu);
6557 NOREF(GCPhysMem);
6558 NOREF(fAccess);
6559 NOREF(pvMem);
6560 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6561}
6562
6563
6564/**
6565 * Looks up a memory mapping entry.
6566 *
6567 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6568 * @param pIemCpu The IEM per CPU data.
6569 * @param pvMem The memory address.
6570 * @param fAccess The access to.
6571 */
6572DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6573{
6574 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
6575 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6576 if ( pIemCpu->aMemMappings[0].pv == pvMem
6577 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6578 return 0;
6579 if ( pIemCpu->aMemMappings[1].pv == pvMem
6580 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6581 return 1;
6582 if ( pIemCpu->aMemMappings[2].pv == pvMem
6583 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6584 return 2;
6585 return VERR_NOT_FOUND;
6586}
6587
6588
6589/**
6590 * Finds a free memmap entry when using iNextMapping doesn't work.
6591 *
6592 * @returns Memory mapping index, 1024 on failure.
6593 * @param pIemCpu The IEM per CPU data.
6594 */
6595IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6596{
6597 /*
6598 * The easy case.
6599 */
6600 if (pIemCpu->cActiveMappings == 0)
6601 {
6602 pIemCpu->iNextMapping = 1;
6603 return 0;
6604 }
6605
6606 /* There should be enough mappings for all instructions. */
6607 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6608
6609 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6610 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6611 return i;
6612
6613 AssertFailedReturn(1024);
6614}
6615
6616
6617/**
6618 * Commits a bounce buffer that needs writing back and unmaps it.
6619 *
6620 * @returns Strict VBox status code.
6621 * @param pIemCpu The IEM per CPU data.
6622 * @param iMemMap The index of the buffer to commit.
6623 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
6624 * Always false in ring-3, obviously.
6625 */
6626IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap, bool fPostponeFail)
6627{
6628 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6629 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6630#ifdef IN_RING3
6631 Assert(!fPostponeFail);
6632#endif
6633
6634 /*
6635 * Do the writing.
6636 */
6637#ifndef IEM_VERIFICATION_MODE_MINIMAL
6638 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6639 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6640 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6641 {
6642 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6643 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6644 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6645 if (!pIemCpu->fBypassHandlers)
6646 {
6647 /*
6648 * Carefully and efficiently dealing with access handler return
6649 * codes make this a little bloated.
6650 */
6651 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6652 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6653 pbBuf,
6654 cbFirst,
6655 PGMACCESSORIGIN_IEM);
6656 if (rcStrict == VINF_SUCCESS)
6657 {
6658 if (cbSecond)
6659 {
6660 rcStrict = PGMPhysWrite(pVM,
6661 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6662 pbBuf + cbFirst,
6663 cbSecond,
6664 PGMACCESSORIGIN_IEM);
6665 if (rcStrict == VINF_SUCCESS)
6666 { /* nothing */ }
6667 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6668 {
6669 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6670 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6671 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6672 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6673 }
6674# ifndef IN_RING3
6675 else if (fPostponeFail)
6676 {
6677 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6678 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6679 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6680 pIemCpu->aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6681 VMCPU_FF_SET(IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);
6682 return iemSetPassUpStatus(pIemCpu, rcStrict);
6683 }
6684# endif
6685 else
6686 {
6687 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6688 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6689 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6690 return rcStrict;
6691 }
6692 }
6693 }
6694 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6695 {
6696 if (!cbSecond)
6697 {
6698 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6699 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6700 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6701 }
6702 else
6703 {
6704 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6705 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6706 pbBuf + cbFirst,
6707 cbSecond,
6708 PGMACCESSORIGIN_IEM);
6709 if (rcStrict2 == VINF_SUCCESS)
6710 {
6711 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6712 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6713 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6714 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6715 }
6716 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6717 {
6718 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6719 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6720 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6721 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6722 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6723 }
6724# ifndef IN_RING3
6725 else if (fPostponeFail)
6726 {
6727 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6728 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6729 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6730 pIemCpu->aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6731 VMCPU_FF_SET(IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);
6732 return iemSetPassUpStatus(pIemCpu, rcStrict);
6733 }
6734# endif
6735 else
6736 {
6737 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6738 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6739 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6740 return rcStrict2;
6741 }
6742 }
6743 }
6744# ifndef IN_RING3
6745 else if (fPostponeFail)
6746 {
6747 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6748 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6749 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6750 if (!cbSecond)
6751 pIemCpu->aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
6752 else
6753 pIemCpu->aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
6754 VMCPU_FF_SET(IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);
6755 return iemSetPassUpStatus(pIemCpu, rcStrict);
6756 }
6757# endif
6758 else
6759 {
6760 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6761 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6762 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6763 return rcStrict;
6764 }
6765 }
6766 else
6767 {
6768 /*
6769 * No access handlers, much simpler.
6770 */
6771 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6772 if (RT_SUCCESS(rc))
6773 {
6774 if (cbSecond)
6775 {
6776 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6777 if (RT_SUCCESS(rc))
6778 { /* likely */ }
6779 else
6780 {
6781 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6782 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6783 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6784 return rc;
6785 }
6786 }
6787 }
6788 else
6789 {
6790 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6791 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6792 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6793 return rc;
6794 }
6795 }
6796 }
6797#endif
6798
6799#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6800 /*
6801 * Record the write(s).
6802 */
6803 if (!pIemCpu->fNoRem)
6804 {
6805 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6806 if (pEvtRec)
6807 {
6808 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6809 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6810 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6811 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6812 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6813 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6814 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6815 }
6816 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6817 {
6818 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6819 if (pEvtRec)
6820 {
6821 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6822 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6823 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6824 memcpy(pEvtRec->u.RamWrite.ab,
6825 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6826 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6827 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6828 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6829 }
6830 }
6831 }
6832#endif
6833#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6834 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6835 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6836 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6837 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6838 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6839 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6840
6841 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6842 g_cbIemWrote = cbWrote;
6843 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6844#endif
6845
6846 /*
6847 * Free the mapping entry.
6848 */
6849 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6850 Assert(pIemCpu->cActiveMappings != 0);
6851 pIemCpu->cActiveMappings--;
6852 return VINF_SUCCESS;
6853}
6854
6855
6856/**
6857 * iemMemMap worker that deals with a request crossing pages.
6858 */
6859IEM_STATIC VBOXSTRICTRC
6860iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6861{
6862 /*
6863 * Do the address translations.
6864 */
6865 RTGCPHYS GCPhysFirst;
6866 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6867 if (rcStrict != VINF_SUCCESS)
6868 return rcStrict;
6869
6870 RTGCPHYS GCPhysSecond;
6871 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
6872 fAccess, &GCPhysSecond);
6873 if (rcStrict != VINF_SUCCESS)
6874 return rcStrict;
6875 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6876
6877 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6878#ifdef IEM_VERIFICATION_MODE_FULL
6879 /*
6880 * Detect problematic memory when verifying so we can select
6881 * the right execution engine. (TLB: Redo this.)
6882 */
6883 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6884 {
6885 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6886 if (RT_SUCCESS(rc2))
6887 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6888 if (RT_FAILURE(rc2))
6889 pIemCpu->fProblematicMemory = true;
6890 }
6891#endif
6892
6893
6894 /*
6895 * Read in the current memory content if it's a read, execute or partial
6896 * write access.
6897 */
6898 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6899 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6900 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6901
6902 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6903 {
6904 if (!pIemCpu->fBypassHandlers)
6905 {
6906 /*
6907 * Must carefully deal with access handler status codes here,
6908 * makes the code a bit bloated.
6909 */
6910 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6911 if (rcStrict == VINF_SUCCESS)
6912 {
6913 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6914 if (rcStrict == VINF_SUCCESS)
6915 { /*likely */ }
6916 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6917 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6918 else
6919 {
6920 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6921 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6922 return rcStrict;
6923 }
6924 }
6925 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6926 {
6927 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6928 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6929 {
6930 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6931 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6932 }
6933 else
6934 {
6935 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6936 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6937 return rcStrict2;
6938 }
6939 }
6940 else
6941 {
6942 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6943 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6944 return rcStrict;
6945 }
6946 }
6947 else
6948 {
6949 /*
6950 * No informational status codes here, much more straight forward.
6951 */
6952 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6953 if (RT_SUCCESS(rc))
6954 {
6955 Assert(rc == VINF_SUCCESS);
6956 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6957 if (RT_SUCCESS(rc))
6958 Assert(rc == VINF_SUCCESS);
6959 else
6960 {
6961 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6962 return rc;
6963 }
6964 }
6965 else
6966 {
6967 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6968 return rc;
6969 }
6970 }
6971
6972#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6973 if ( !pIemCpu->fNoRem
6974 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6975 {
6976 /*
6977 * Record the reads.
6978 */
6979 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6980 if (pEvtRec)
6981 {
6982 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6983 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6984 pEvtRec->u.RamRead.cb = cbFirstPage;
6985 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6986 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6987 }
6988 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6989 if (pEvtRec)
6990 {
6991 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6992 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6993 pEvtRec->u.RamRead.cb = cbSecondPage;
6994 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6995 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6996 }
6997 }
6998#endif
6999 }
7000#ifdef VBOX_STRICT
7001 else
7002 memset(pbBuf, 0xcc, cbMem);
7003 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
7004 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
7005#endif
7006
7007 /*
7008 * Commit the bounce buffer entry.
7009 */
7010 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
7011 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
7012 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
7013 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
7014 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
7015 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
7016 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
7017 pIemCpu->iNextMapping = iMemMap + 1;
7018 pIemCpu->cActiveMappings++;
7019
7020 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7021 *ppvMem = pbBuf;
7022 return VINF_SUCCESS;
7023}
7024
7025
7026/**
7027 * iemMemMap woker that deals with iemMemPageMap failures.
7028 */
7029IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
7030 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
7031{
7032 /*
7033 * Filter out conditions we can handle and the ones which shouldn't happen.
7034 */
7035 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
7036 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
7037 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
7038 {
7039 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
7040 return rcMap;
7041 }
7042 pIemCpu->cPotentialExits++;
7043
7044 /*
7045 * Read in the current memory content if it's a read, execute or partial
7046 * write access.
7047 */
7048 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
7049 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
7050 {
7051 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
7052 memset(pbBuf, 0xff, cbMem);
7053 else
7054 {
7055 int rc;
7056 if (!pIemCpu->fBypassHandlers)
7057 {
7058 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
7059 if (rcStrict == VINF_SUCCESS)
7060 { /* nothing */ }
7061 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7062 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
7063 else
7064 {
7065 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
7066 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7067 return rcStrict;
7068 }
7069 }
7070 else
7071 {
7072 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
7073 if (RT_SUCCESS(rc))
7074 { /* likely */ }
7075 else
7076 {
7077 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
7078 GCPhysFirst, rc));
7079 return rc;
7080 }
7081 }
7082 }
7083
7084#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7085 if ( !pIemCpu->fNoRem
7086 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
7087 {
7088 /*
7089 * Record the read.
7090 */
7091 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7092 if (pEvtRec)
7093 {
7094 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
7095 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
7096 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
7097 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7098 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7099 }
7100 }
7101#endif
7102 }
7103#ifdef VBOX_STRICT
7104 else
7105 memset(pbBuf, 0xcc, cbMem);
7106#endif
7107#ifdef VBOX_STRICT
7108 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
7109 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
7110#endif
7111
7112 /*
7113 * Commit the bounce buffer entry.
7114 */
7115 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
7116 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
7117 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
7118 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
7119 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
7120 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
7121 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
7122 pIemCpu->iNextMapping = iMemMap + 1;
7123 pIemCpu->cActiveMappings++;
7124
7125 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7126 *ppvMem = pbBuf;
7127 return VINF_SUCCESS;
7128}
7129
7130
7131
7132/**
7133 * Maps the specified guest memory for the given kind of access.
7134 *
7135 * This may be using bounce buffering of the memory if it's crossing a page
7136 * boundary or if there is an access handler installed for any of it. Because
7137 * of lock prefix guarantees, we're in for some extra clutter when this
7138 * happens.
7139 *
7140 * This may raise a \#GP, \#SS, \#PF or \#AC.
7141 *
7142 * @returns VBox strict status code.
7143 *
7144 * @param pIemCpu The IEM per CPU data.
7145 * @param ppvMem Where to return the pointer to the mapped
7146 * memory.
7147 * @param cbMem The number of bytes to map. This is usually 1,
7148 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7149 * string operations it can be up to a page.
7150 * @param iSegReg The index of the segment register to use for
7151 * this access. The base and limits are checked.
7152 * Use UINT8_MAX to indicate that no segmentation
7153 * is required (for IDT, GDT and LDT accesses).
7154 * @param GCPtrMem The address of the guest memory.
7155 * @param fAccess How the memory is being accessed. The
7156 * IEM_ACCESS_TYPE_XXX bit is used to figure out
7157 * how to map the memory, while the
7158 * IEM_ACCESS_WHAT_XXX bit is used when raising
7159 * exceptions.
7160 */
7161IEM_STATIC VBOXSTRICTRC
7162iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
7163{
7164 /*
7165 * Check the input and figure out which mapping entry to use.
7166 */
7167 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7168 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
7169 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
7170
7171 unsigned iMemMap = pIemCpu->iNextMapping;
7172 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
7173 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7174 {
7175 iMemMap = iemMemMapFindFree(pIemCpu);
7176 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
7177 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
7178 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
7179 pIemCpu->aMemMappings[2].fAccess),
7180 VERR_IEM_IPE_9);
7181 }
7182
7183 /*
7184 * Map the memory, checking that we can actually access it. If something
7185 * slightly complicated happens, fall back on bounce buffering.
7186 */
7187 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7188 if (rcStrict != VINF_SUCCESS)
7189 return rcStrict;
7190
7191 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
7192 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
7193
7194 RTGCPHYS GCPhysFirst;
7195 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
7196 if (rcStrict != VINF_SUCCESS)
7197 return rcStrict;
7198
7199 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7200 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7201 if (fAccess & IEM_ACCESS_TYPE_READ)
7202 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7203
7204 void *pvMem;
7205 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7206 if (rcStrict != VINF_SUCCESS)
7207 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
7208
7209 /*
7210 * Fill in the mapping table entry.
7211 */
7212 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7213 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7214 pIemCpu->iNextMapping = iMemMap + 1;
7215 pIemCpu->cActiveMappings++;
7216
7217 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7218 *ppvMem = pvMem;
7219 return VINF_SUCCESS;
7220}
7221
7222
7223/**
7224 * Commits the guest memory if bounce buffered and unmaps it.
7225 *
7226 * @returns Strict VBox status code.
7227 * @param pIemCpu The IEM per CPU data.
7228 * @param pvMem The mapping.
7229 * @param fAccess The kind of access.
7230 */
7231IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7232{
7233 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7234 AssertReturn(iMemMap >= 0, iMemMap);
7235
7236 /* If it's bounce buffered, we may need to write back the buffer. */
7237 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7238 {
7239 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7240 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap, false /*fPostponeFail*/);
7241 }
7242 /* Otherwise unlock it. */
7243 else
7244 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7245
7246 /* Free the entry. */
7247 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7248 Assert(pIemCpu->cActiveMappings != 0);
7249 pIemCpu->cActiveMappings--;
7250 return VINF_SUCCESS;
7251}
7252
7253
7254#ifndef IN_RING3
7255/**
7256 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7257 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7258 *
7259 * Allows the instruction to be completed and retired, while the IEM user will
7260 * return to ring-3 immediately afterwards and do the postponed writes there.
7261 *
7262 * @returns VBox status code (no strict statuses). Caller must check
7263 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7264 * @param pIemCpu The IEM per CPU data.
7265 * @param pvMem The mapping.
7266 * @param fAccess The kind of access.
7267 */
7268IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7269{
7270 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7271 AssertReturn(iMemMap >= 0, iMemMap);
7272
7273 /* If it's bounce buffered, we may need to write back the buffer. */
7274 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7275 {
7276 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7277 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap, true /*fPostponeFail*/);
7278 }
7279 /* Otherwise unlock it. */
7280 else
7281 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7282
7283 /* Free the entry. */
7284 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7285 Assert(pIemCpu->cActiveMappings != 0);
7286 pIemCpu->cActiveMappings--;
7287 return VINF_SUCCESS;
7288}
7289#endif
7290
7291
7292/**
7293 * Rollbacks mappings, releasing page locks and such.
7294 *
7295 * The caller shall only call this after checking cActiveMappings.
7296 *
7297 * @returns Strict VBox status code to pass up.
7298 * @param pIemCpu The IEM per CPU data.
7299 */
7300IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7301{
7302 Assert(pIemCpu->cActiveMappings > 0);
7303
7304 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7305 while (iMemMap-- > 0)
7306 {
7307 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7308 if (fAccess != IEM_ACCESS_INVALID)
7309 {
7310 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7311 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7312 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7313 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7314 Assert(pIemCpu->cActiveMappings > 0);
7315 pIemCpu->cActiveMappings--;
7316 }
7317 }
7318}
7319
7320
7321/**
7322 * Fetches a data byte.
7323 *
7324 * @returns Strict VBox status code.
7325 * @param pIemCpu The IEM per CPU data.
7326 * @param pu8Dst Where to return the byte.
7327 * @param iSegReg The index of the segment register to use for
7328 * this access. The base and limits are checked.
7329 * @param GCPtrMem The address of the guest memory.
7330 */
7331IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7332{
7333 /* The lazy approach for now... */
7334 uint8_t const *pu8Src;
7335 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7336 if (rc == VINF_SUCCESS)
7337 {
7338 *pu8Dst = *pu8Src;
7339 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7340 }
7341 return rc;
7342}
7343
7344
7345/**
7346 * Fetches a data word.
7347 *
7348 * @returns Strict VBox status code.
7349 * @param pIemCpu The IEM per CPU data.
7350 * @param pu16Dst Where to return the word.
7351 * @param iSegReg The index of the segment register to use for
7352 * this access. The base and limits are checked.
7353 * @param GCPtrMem The address of the guest memory.
7354 */
7355IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7356{
7357 /* The lazy approach for now... */
7358 uint16_t const *pu16Src;
7359 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7360 if (rc == VINF_SUCCESS)
7361 {
7362 *pu16Dst = *pu16Src;
7363 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7364 }
7365 return rc;
7366}
7367
7368
7369/**
7370 * Fetches a data dword.
7371 *
7372 * @returns Strict VBox status code.
7373 * @param pIemCpu The IEM per CPU data.
7374 * @param pu32Dst Where to return the dword.
7375 * @param iSegReg The index of the segment register to use for
7376 * this access. The base and limits are checked.
7377 * @param GCPtrMem The address of the guest memory.
7378 */
7379IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7380{
7381 /* The lazy approach for now... */
7382 uint32_t const *pu32Src;
7383 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7384 if (rc == VINF_SUCCESS)
7385 {
7386 *pu32Dst = *pu32Src;
7387 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7388 }
7389 return rc;
7390}
7391
7392
7393#ifdef SOME_UNUSED_FUNCTION
7394/**
7395 * Fetches a data dword and sign extends it to a qword.
7396 *
7397 * @returns Strict VBox status code.
7398 * @param pIemCpu The IEM per CPU data.
7399 * @param pu64Dst Where to return the sign extended value.
7400 * @param iSegReg The index of the segment register to use for
7401 * this access. The base and limits are checked.
7402 * @param GCPtrMem The address of the guest memory.
7403 */
7404IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7405{
7406 /* The lazy approach for now... */
7407 int32_t const *pi32Src;
7408 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7409 if (rc == VINF_SUCCESS)
7410 {
7411 *pu64Dst = *pi32Src;
7412 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7413 }
7414#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7415 else
7416 *pu64Dst = 0;
7417#endif
7418 return rc;
7419}
7420#endif
7421
7422
7423/**
7424 * Fetches a data qword.
7425 *
7426 * @returns Strict VBox status code.
7427 * @param pIemCpu The IEM per CPU data.
7428 * @param pu64Dst Where to return the qword.
7429 * @param iSegReg The index of the segment register to use for
7430 * this access. The base and limits are checked.
7431 * @param GCPtrMem The address of the guest memory.
7432 */
7433IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7434{
7435 /* The lazy approach for now... */
7436 uint64_t const *pu64Src;
7437 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7438 if (rc == VINF_SUCCESS)
7439 {
7440 *pu64Dst = *pu64Src;
7441 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7442 }
7443 return rc;
7444}
7445
7446
7447/**
7448 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7449 *
7450 * @returns Strict VBox status code.
7451 * @param pIemCpu The IEM per CPU data.
7452 * @param pu64Dst Where to return the qword.
7453 * @param iSegReg The index of the segment register to use for
7454 * this access. The base and limits are checked.
7455 * @param GCPtrMem The address of the guest memory.
7456 */
7457IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7458{
7459 /* The lazy approach for now... */
7460 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7461 if (RT_UNLIKELY(GCPtrMem & 15))
7462 return iemRaiseGeneralProtectionFault0(pIemCpu);
7463
7464 uint64_t const *pu64Src;
7465 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7466 if (rc == VINF_SUCCESS)
7467 {
7468 *pu64Dst = *pu64Src;
7469 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7470 }
7471 return rc;
7472}
7473
7474
7475/**
7476 * Fetches a data tword.
7477 *
7478 * @returns Strict VBox status code.
7479 * @param pIemCpu The IEM per CPU data.
7480 * @param pr80Dst Where to return the tword.
7481 * @param iSegReg The index of the segment register to use for
7482 * this access. The base and limits are checked.
7483 * @param GCPtrMem The address of the guest memory.
7484 */
7485IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7486{
7487 /* The lazy approach for now... */
7488 PCRTFLOAT80U pr80Src;
7489 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7490 if (rc == VINF_SUCCESS)
7491 {
7492 *pr80Dst = *pr80Src;
7493 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7494 }
7495 return rc;
7496}
7497
7498
7499/**
7500 * Fetches a data dqword (double qword), generally SSE related.
7501 *
7502 * @returns Strict VBox status code.
7503 * @param pIemCpu The IEM per CPU data.
7504 * @param pu128Dst Where to return the qword.
7505 * @param iSegReg The index of the segment register to use for
7506 * this access. The base and limits are checked.
7507 * @param GCPtrMem The address of the guest memory.
7508 */
7509IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7510{
7511 /* The lazy approach for now... */
7512 uint128_t const *pu128Src;
7513 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7514 if (rc == VINF_SUCCESS)
7515 {
7516 *pu128Dst = *pu128Src;
7517 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7518 }
7519 return rc;
7520}
7521
7522
7523/**
7524 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7525 * related.
7526 *
7527 * Raises \#GP(0) if not aligned.
7528 *
7529 * @returns Strict VBox status code.
7530 * @param pIemCpu The IEM per CPU data.
7531 * @param pu128Dst Where to return the qword.
7532 * @param iSegReg The index of the segment register to use for
7533 * this access. The base and limits are checked.
7534 * @param GCPtrMem The address of the guest memory.
7535 */
7536IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7537{
7538 /* The lazy approach for now... */
7539 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7540 if ( (GCPtrMem & 15)
7541 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7542 return iemRaiseGeneralProtectionFault0(pIemCpu);
7543
7544 uint128_t const *pu128Src;
7545 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7546 if (rc == VINF_SUCCESS)
7547 {
7548 *pu128Dst = *pu128Src;
7549 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7550 }
7551 return rc;
7552}
7553
7554
7555
7556
7557/**
7558 * Fetches a descriptor register (lgdt, lidt).
7559 *
7560 * @returns Strict VBox status code.
7561 * @param pIemCpu The IEM per CPU data.
7562 * @param pcbLimit Where to return the limit.
7563 * @param pGCPtrBase Where to return the base.
7564 * @param iSegReg The index of the segment register to use for
7565 * this access. The base and limits are checked.
7566 * @param GCPtrMem The address of the guest memory.
7567 * @param enmOpSize The effective operand size.
7568 */
7569IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7570 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7571{
7572 /*
7573 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7574 * little special:
7575 * - The two reads are done separately.
7576 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7577 * - We suspect the 386 to actually commit the limit before the base in
7578 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7579 * don't try emulate this eccentric behavior, because it's not well
7580 * enough understood and rather hard to trigger.
7581 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7582 */
7583 VBOXSTRICTRC rcStrict;
7584 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7585 {
7586 rcStrict = iemMemFetchDataU16(pIemCpu, pcbLimit, iSegReg, GCPtrMem);
7587 if (rcStrict == VINF_SUCCESS)
7588 rcStrict = iemMemFetchDataU64(pIemCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7589 }
7590 else
7591 {
7592 uint32_t uTmp;
7593 if (enmOpSize == IEMMODE_32BIT)
7594 {
7595 if (IEM_GET_TARGET_CPU(pIemCpu) != IEMTARGETCPU_486)
7596 {
7597 rcStrict = iemMemFetchDataU16(pIemCpu, pcbLimit, iSegReg, GCPtrMem);
7598 if (rcStrict == VINF_SUCCESS)
7599 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem + 2);
7600 }
7601 else
7602 {
7603 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem);
7604 if (rcStrict == VINF_SUCCESS)
7605 {
7606 *pcbLimit = (uint16_t)uTmp;
7607 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem + 2);
7608 }
7609 }
7610 if (rcStrict == VINF_SUCCESS)
7611 *pGCPtrBase = uTmp;
7612 }
7613 else
7614 {
7615 rcStrict = iemMemFetchDataU16(pIemCpu, pcbLimit, iSegReg, GCPtrMem);
7616 if (rcStrict == VINF_SUCCESS)
7617 {
7618 rcStrict = iemMemFetchDataU32(pIemCpu, &uTmp, iSegReg, GCPtrMem + 2);
7619 if (rcStrict == VINF_SUCCESS)
7620 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7621 }
7622 }
7623 }
7624 return rcStrict;
7625}
7626
7627
7628
7629/**
7630 * Stores a data byte.
7631 *
7632 * @returns Strict VBox status code.
7633 * @param pIemCpu The IEM per CPU data.
7634 * @param iSegReg The index of the segment register to use for
7635 * this access. The base and limits are checked.
7636 * @param GCPtrMem The address of the guest memory.
7637 * @param u8Value The value to store.
7638 */
7639IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7640{
7641 /* The lazy approach for now... */
7642 uint8_t *pu8Dst;
7643 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7644 if (rc == VINF_SUCCESS)
7645 {
7646 *pu8Dst = u8Value;
7647 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7648 }
7649 return rc;
7650}
7651
7652
7653/**
7654 * Stores a data word.
7655 *
7656 * @returns Strict VBox status code.
7657 * @param pIemCpu The IEM per CPU data.
7658 * @param iSegReg The index of the segment register to use for
7659 * this access. The base and limits are checked.
7660 * @param GCPtrMem The address of the guest memory.
7661 * @param u16Value The value to store.
7662 */
7663IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7664{
7665 /* The lazy approach for now... */
7666 uint16_t *pu16Dst;
7667 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7668 if (rc == VINF_SUCCESS)
7669 {
7670 *pu16Dst = u16Value;
7671 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7672 }
7673 return rc;
7674}
7675
7676
7677/**
7678 * Stores a data dword.
7679 *
7680 * @returns Strict VBox status code.
7681 * @param pIemCpu The IEM per CPU data.
7682 * @param iSegReg The index of the segment register to use for
7683 * this access. The base and limits are checked.
7684 * @param GCPtrMem The address of the guest memory.
7685 * @param u32Value The value to store.
7686 */
7687IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7688{
7689 /* The lazy approach for now... */
7690 uint32_t *pu32Dst;
7691 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7692 if (rc == VINF_SUCCESS)
7693 {
7694 *pu32Dst = u32Value;
7695 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7696 }
7697 return rc;
7698}
7699
7700
7701/**
7702 * Stores a data qword.
7703 *
7704 * @returns Strict VBox status code.
7705 * @param pIemCpu The IEM per CPU data.
7706 * @param iSegReg The index of the segment register to use for
7707 * this access. The base and limits are checked.
7708 * @param GCPtrMem The address of the guest memory.
7709 * @param u64Value The value to store.
7710 */
7711IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7712{
7713 /* The lazy approach for now... */
7714 uint64_t *pu64Dst;
7715 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7716 if (rc == VINF_SUCCESS)
7717 {
7718 *pu64Dst = u64Value;
7719 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7720 }
7721 return rc;
7722}
7723
7724
7725/**
7726 * Stores a data dqword.
7727 *
7728 * @returns Strict VBox status code.
7729 * @param pIemCpu The IEM per CPU data.
7730 * @param iSegReg The index of the segment register to use for
7731 * this access. The base and limits are checked.
7732 * @param GCPtrMem The address of the guest memory.
7733 * @param u128Value The value to store.
7734 */
7735IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7736{
7737 /* The lazy approach for now... */
7738 uint128_t *pu128Dst;
7739 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7740 if (rc == VINF_SUCCESS)
7741 {
7742 *pu128Dst = u128Value;
7743 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7744 }
7745 return rc;
7746}
7747
7748
7749/**
7750 * Stores a data dqword, SSE aligned.
7751 *
7752 * @returns Strict VBox status code.
7753 * @param pIemCpu The IEM per CPU data.
7754 * @param iSegReg The index of the segment register to use for
7755 * this access. The base and limits are checked.
7756 * @param GCPtrMem The address of the guest memory.
7757 * @param u128Value The value to store.
7758 */
7759IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7760{
7761 /* The lazy approach for now... */
7762 if ( (GCPtrMem & 15)
7763 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7764 return iemRaiseGeneralProtectionFault0(pIemCpu);
7765
7766 uint128_t *pu128Dst;
7767 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7768 if (rc == VINF_SUCCESS)
7769 {
7770 *pu128Dst = u128Value;
7771 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7772 }
7773 return rc;
7774}
7775
7776
7777/**
7778 * Stores a descriptor register (sgdt, sidt).
7779 *
7780 * @returns Strict VBox status code.
7781 * @param pIemCpu The IEM per CPU data.
7782 * @param cbLimit The limit.
7783 * @param GCPtrBase The base address.
7784 * @param iSegReg The index of the segment register to use for
7785 * this access. The base and limits are checked.
7786 * @param GCPtrMem The address of the guest memory.
7787 */
7788IEM_STATIC VBOXSTRICTRC
7789iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
7790{
7791 /*
7792 * The SIDT and SGDT instructions actually stores the data using two
7793 * independent writes. The instructions does not respond to opsize prefixes.
7794 */
7795 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pIemCpu, iSegReg, GCPtrMem, cbLimit);
7796 if (rcStrict == VINF_SUCCESS)
7797 {
7798 if (pIemCpu->enmCpuMode == IEMMODE_16BIT)
7799 rcStrict = iemMemStoreDataU32(pIemCpu, iSegReg, GCPtrMem + 2,
7800 IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_286
7801 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7802 else if (pIemCpu->enmCpuMode == IEMMODE_32BIT)
7803 rcStrict = iemMemStoreDataU32(pIemCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7804 else
7805 rcStrict = iemMemStoreDataU64(pIemCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7806 }
7807 return rcStrict;
7808}
7809
7810
7811/**
7812 * Pushes a word onto the stack.
7813 *
7814 * @returns Strict VBox status code.
7815 * @param pIemCpu The IEM per CPU data.
7816 * @param u16Value The value to push.
7817 */
7818IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7819{
7820 /* Increment the stack pointer. */
7821 uint64_t uNewRsp;
7822 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7823 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7824
7825 /* Write the word the lazy way. */
7826 uint16_t *pu16Dst;
7827 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7828 if (rc == VINF_SUCCESS)
7829 {
7830 *pu16Dst = u16Value;
7831 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7832 }
7833
7834 /* Commit the new RSP value unless we an access handler made trouble. */
7835 if (rc == VINF_SUCCESS)
7836 pCtx->rsp = uNewRsp;
7837
7838 return rc;
7839}
7840
7841
7842/**
7843 * Pushes a dword onto the stack.
7844 *
7845 * @returns Strict VBox status code.
7846 * @param pIemCpu The IEM per CPU data.
7847 * @param u32Value The value to push.
7848 */
7849IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7850{
7851 /* Increment the stack pointer. */
7852 uint64_t uNewRsp;
7853 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7854 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7855
7856 /* Write the dword the lazy way. */
7857 uint32_t *pu32Dst;
7858 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7859 if (rc == VINF_SUCCESS)
7860 {
7861 *pu32Dst = u32Value;
7862 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7863 }
7864
7865 /* Commit the new RSP value unless we an access handler made trouble. */
7866 if (rc == VINF_SUCCESS)
7867 pCtx->rsp = uNewRsp;
7868
7869 return rc;
7870}
7871
7872
7873/**
7874 * Pushes a dword segment register value onto the stack.
7875 *
7876 * @returns Strict VBox status code.
7877 * @param pIemCpu The IEM per CPU data.
7878 * @param u32Value The value to push.
7879 */
7880IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7881{
7882 /* Increment the stack pointer. */
7883 uint64_t uNewRsp;
7884 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7885 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7886
7887 VBOXSTRICTRC rc;
7888 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7889 {
7890 /* The recompiler writes a full dword. */
7891 uint32_t *pu32Dst;
7892 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7893 if (rc == VINF_SUCCESS)
7894 {
7895 *pu32Dst = u32Value;
7896 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7897 }
7898 }
7899 else
7900 {
7901 /* The intel docs talks about zero extending the selector register
7902 value. My actual intel CPU here might be zero extending the value
7903 but it still only writes the lower word... */
7904 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7905 * happens when crossing an electric page boundrary, is the high word checked
7906 * for write accessibility or not? Probably it is. What about segment limits?
7907 * It appears this behavior is also shared with trap error codes.
7908 *
7909 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7910 * ancient hardware when it actually did change. */
7911 uint16_t *pu16Dst;
7912 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7913 if (rc == VINF_SUCCESS)
7914 {
7915 *pu16Dst = (uint16_t)u32Value;
7916 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7917 }
7918 }
7919
7920 /* Commit the new RSP value unless we an access handler made trouble. */
7921 if (rc == VINF_SUCCESS)
7922 pCtx->rsp = uNewRsp;
7923
7924 return rc;
7925}
7926
7927
7928/**
7929 * Pushes a qword onto the stack.
7930 *
7931 * @returns Strict VBox status code.
7932 * @param pIemCpu The IEM per CPU data.
7933 * @param u64Value The value to push.
7934 */
7935IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7936{
7937 /* Increment the stack pointer. */
7938 uint64_t uNewRsp;
7939 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7940 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7941
7942 /* Write the word the lazy way. */
7943 uint64_t *pu64Dst;
7944 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7945 if (rc == VINF_SUCCESS)
7946 {
7947 *pu64Dst = u64Value;
7948 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7949 }
7950
7951 /* Commit the new RSP value unless we an access handler made trouble. */
7952 if (rc == VINF_SUCCESS)
7953 pCtx->rsp = uNewRsp;
7954
7955 return rc;
7956}
7957
7958
7959/**
7960 * Pops a word from the stack.
7961 *
7962 * @returns Strict VBox status code.
7963 * @param pIemCpu The IEM per CPU data.
7964 * @param pu16Value Where to store the popped value.
7965 */
7966IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7967{
7968 /* Increment the stack pointer. */
7969 uint64_t uNewRsp;
7970 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7971 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7972
7973 /* Write the word the lazy way. */
7974 uint16_t const *pu16Src;
7975 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7976 if (rc == VINF_SUCCESS)
7977 {
7978 *pu16Value = *pu16Src;
7979 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7980
7981 /* Commit the new RSP value. */
7982 if (rc == VINF_SUCCESS)
7983 pCtx->rsp = uNewRsp;
7984 }
7985
7986 return rc;
7987}
7988
7989
7990/**
7991 * Pops a dword from the stack.
7992 *
7993 * @returns Strict VBox status code.
7994 * @param pIemCpu The IEM per CPU data.
7995 * @param pu32Value Where to store the popped value.
7996 */
7997IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7998{
7999 /* Increment the stack pointer. */
8000 uint64_t uNewRsp;
8001 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8002 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
8003
8004 /* Write the word the lazy way. */
8005 uint32_t const *pu32Src;
8006 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8007 if (rc == VINF_SUCCESS)
8008 {
8009 *pu32Value = *pu32Src;
8010 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8011
8012 /* Commit the new RSP value. */
8013 if (rc == VINF_SUCCESS)
8014 pCtx->rsp = uNewRsp;
8015 }
8016
8017 return rc;
8018}
8019
8020
8021/**
8022 * Pops a qword from the stack.
8023 *
8024 * @returns Strict VBox status code.
8025 * @param pIemCpu The IEM per CPU data.
8026 * @param pu64Value Where to store the popped value.
8027 */
8028IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
8029{
8030 /* Increment the stack pointer. */
8031 uint64_t uNewRsp;
8032 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8033 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
8034
8035 /* Write the word the lazy way. */
8036 uint64_t const *pu64Src;
8037 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8038 if (rc == VINF_SUCCESS)
8039 {
8040 *pu64Value = *pu64Src;
8041 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8042
8043 /* Commit the new RSP value. */
8044 if (rc == VINF_SUCCESS)
8045 pCtx->rsp = uNewRsp;
8046 }
8047
8048 return rc;
8049}
8050
8051
8052/**
8053 * Pushes a word onto the stack, using a temporary stack pointer.
8054 *
8055 * @returns Strict VBox status code.
8056 * @param pIemCpu The IEM per CPU data.
8057 * @param u16Value The value to push.
8058 * @param pTmpRsp Pointer to the temporary stack pointer.
8059 */
8060IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
8061{
8062 /* Increment the stack pointer. */
8063 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8064 RTUINT64U NewRsp = *pTmpRsp;
8065 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
8066
8067 /* Write the word the lazy way. */
8068 uint16_t *pu16Dst;
8069 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8070 if (rc == VINF_SUCCESS)
8071 {
8072 *pu16Dst = u16Value;
8073 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
8074 }
8075
8076 /* Commit the new RSP value unless we an access handler made trouble. */
8077 if (rc == VINF_SUCCESS)
8078 *pTmpRsp = NewRsp;
8079
8080 return rc;
8081}
8082
8083
8084/**
8085 * Pushes a dword onto the stack, using a temporary stack pointer.
8086 *
8087 * @returns Strict VBox status code.
8088 * @param pIemCpu The IEM per CPU data.
8089 * @param u32Value The value to push.
8090 * @param pTmpRsp Pointer to the temporary stack pointer.
8091 */
8092IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
8093{
8094 /* Increment the stack pointer. */
8095 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8096 RTUINT64U NewRsp = *pTmpRsp;
8097 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
8098
8099 /* Write the word the lazy way. */
8100 uint32_t *pu32Dst;
8101 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8102 if (rc == VINF_SUCCESS)
8103 {
8104 *pu32Dst = u32Value;
8105 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
8106 }
8107
8108 /* Commit the new RSP value unless we an access handler made trouble. */
8109 if (rc == VINF_SUCCESS)
8110 *pTmpRsp = NewRsp;
8111
8112 return rc;
8113}
8114
8115
8116/**
8117 * Pushes a dword onto the stack, using a temporary stack pointer.
8118 *
8119 * @returns Strict VBox status code.
8120 * @param pIemCpu The IEM per CPU data.
8121 * @param u64Value The value to push.
8122 * @param pTmpRsp Pointer to the temporary stack pointer.
8123 */
8124IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
8125{
8126 /* Increment the stack pointer. */
8127 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8128 RTUINT64U NewRsp = *pTmpRsp;
8129 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
8130
8131 /* Write the word the lazy way. */
8132 uint64_t *pu64Dst;
8133 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8134 if (rc == VINF_SUCCESS)
8135 {
8136 *pu64Dst = u64Value;
8137 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
8138 }
8139
8140 /* Commit the new RSP value unless we an access handler made trouble. */
8141 if (rc == VINF_SUCCESS)
8142 *pTmpRsp = NewRsp;
8143
8144 return rc;
8145}
8146
8147
8148/**
8149 * Pops a word from the stack, using a temporary stack pointer.
8150 *
8151 * @returns Strict VBox status code.
8152 * @param pIemCpu The IEM per CPU data.
8153 * @param pu16Value Where to store the popped value.
8154 * @param pTmpRsp Pointer to the temporary stack pointer.
8155 */
8156IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
8157{
8158 /* Increment the stack pointer. */
8159 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8160 RTUINT64U NewRsp = *pTmpRsp;
8161 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
8162
8163 /* Write the word the lazy way. */
8164 uint16_t const *pu16Src;
8165 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8166 if (rc == VINF_SUCCESS)
8167 {
8168 *pu16Value = *pu16Src;
8169 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8170
8171 /* Commit the new RSP value. */
8172 if (rc == VINF_SUCCESS)
8173 *pTmpRsp = NewRsp;
8174 }
8175
8176 return rc;
8177}
8178
8179
8180/**
8181 * Pops a dword from the stack, using a temporary stack pointer.
8182 *
8183 * @returns Strict VBox status code.
8184 * @param pIemCpu The IEM per CPU data.
8185 * @param pu32Value Where to store the popped value.
8186 * @param pTmpRsp Pointer to the temporary stack pointer.
8187 */
8188IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
8189{
8190 /* Increment the stack pointer. */
8191 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8192 RTUINT64U NewRsp = *pTmpRsp;
8193 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
8194
8195 /* Write the word the lazy way. */
8196 uint32_t const *pu32Src;
8197 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8198 if (rc == VINF_SUCCESS)
8199 {
8200 *pu32Value = *pu32Src;
8201 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8202
8203 /* Commit the new RSP value. */
8204 if (rc == VINF_SUCCESS)
8205 *pTmpRsp = NewRsp;
8206 }
8207
8208 return rc;
8209}
8210
8211
8212/**
8213 * Pops a qword from the stack, using a temporary stack pointer.
8214 *
8215 * @returns Strict VBox status code.
8216 * @param pIemCpu The IEM per CPU data.
8217 * @param pu64Value Where to store the popped value.
8218 * @param pTmpRsp Pointer to the temporary stack pointer.
8219 */
8220IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
8221{
8222 /* Increment the stack pointer. */
8223 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8224 RTUINT64U NewRsp = *pTmpRsp;
8225 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8226
8227 /* Write the word the lazy way. */
8228 uint64_t const *pu64Src;
8229 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8230 if (rcStrict == VINF_SUCCESS)
8231 {
8232 *pu64Value = *pu64Src;
8233 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8234
8235 /* Commit the new RSP value. */
8236 if (rcStrict == VINF_SUCCESS)
8237 *pTmpRsp = NewRsp;
8238 }
8239
8240 return rcStrict;
8241}
8242
8243
8244/**
8245 * Begin a special stack push (used by interrupt, exceptions and such).
8246 *
8247 * This will raise \#SS or \#PF if appropriate.
8248 *
8249 * @returns Strict VBox status code.
8250 * @param pIemCpu The IEM per CPU data.
8251 * @param cbMem The number of bytes to push onto the stack.
8252 * @param ppvMem Where to return the pointer to the stack memory.
8253 * As with the other memory functions this could be
8254 * direct access or bounce buffered access, so
8255 * don't commit register until the commit call
8256 * succeeds.
8257 * @param puNewRsp Where to return the new RSP value. This must be
8258 * passed unchanged to
8259 * iemMemStackPushCommitSpecial().
8260 */
8261IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8262{
8263 Assert(cbMem < UINT8_MAX);
8264 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8265 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8266 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8267}
8268
8269
8270/**
8271 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8272 *
8273 * This will update the rSP.
8274 *
8275 * @returns Strict VBox status code.
8276 * @param pIemCpu The IEM per CPU data.
8277 * @param pvMem The pointer returned by
8278 * iemMemStackPushBeginSpecial().
8279 * @param uNewRsp The new RSP value returned by
8280 * iemMemStackPushBeginSpecial().
8281 */
8282IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8283{
8284 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8285 if (rcStrict == VINF_SUCCESS)
8286 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8287 return rcStrict;
8288}
8289
8290
8291/**
8292 * Begin a special stack pop (used by iret, retf and such).
8293 *
8294 * This will raise \#SS or \#PF if appropriate.
8295 *
8296 * @returns Strict VBox status code.
8297 * @param pIemCpu The IEM per CPU data.
8298 * @param cbMem The number of bytes to push onto the stack.
8299 * @param ppvMem Where to return the pointer to the stack memory.
8300 * @param puNewRsp Where to return the new RSP value. This must be
8301 * passed unchanged to
8302 * iemMemStackPopCommitSpecial() or applied
8303 * manually if iemMemStackPopDoneSpecial() is used.
8304 */
8305IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8306{
8307 Assert(cbMem < UINT8_MAX);
8308 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8309 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8310 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8311}
8312
8313
8314/**
8315 * Continue a special stack pop (used by iret and retf).
8316 *
8317 * This will raise \#SS or \#PF if appropriate.
8318 *
8319 * @returns Strict VBox status code.
8320 * @param pIemCpu The IEM per CPU data.
8321 * @param cbMem The number of bytes to push onto the stack.
8322 * @param ppvMem Where to return the pointer to the stack memory.
8323 * @param puNewRsp Where to return the new RSP value. This must be
8324 * passed unchanged to
8325 * iemMemStackPopCommitSpecial() or applied
8326 * manually if iemMemStackPopDoneSpecial() is used.
8327 */
8328IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8329{
8330 Assert(cbMem < UINT8_MAX);
8331 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8332 RTUINT64U NewRsp;
8333 NewRsp.u = *puNewRsp;
8334 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8335 *puNewRsp = NewRsp.u;
8336 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8337}
8338
8339
8340/**
8341 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8342 *
8343 * This will update the rSP.
8344 *
8345 * @returns Strict VBox status code.
8346 * @param pIemCpu The IEM per CPU data.
8347 * @param pvMem The pointer returned by
8348 * iemMemStackPopBeginSpecial().
8349 * @param uNewRsp The new RSP value returned by
8350 * iemMemStackPopBeginSpecial().
8351 */
8352IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8353{
8354 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8355 if (rcStrict == VINF_SUCCESS)
8356 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8357 return rcStrict;
8358}
8359
8360
8361/**
8362 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8363 * iemMemStackPopContinueSpecial).
8364 *
8365 * The caller will manually commit the rSP.
8366 *
8367 * @returns Strict VBox status code.
8368 * @param pIemCpu The IEM per CPU data.
8369 * @param pvMem The pointer returned by
8370 * iemMemStackPopBeginSpecial() or
8371 * iemMemStackPopContinueSpecial().
8372 */
8373IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8374{
8375 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8376}
8377
8378
8379/**
8380 * Fetches a system table byte.
8381 *
8382 * @returns Strict VBox status code.
8383 * @param pIemCpu The IEM per CPU data.
8384 * @param pbDst Where to return the byte.
8385 * @param iSegReg The index of the segment register to use for
8386 * this access. The base and limits are checked.
8387 * @param GCPtrMem The address of the guest memory.
8388 */
8389IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8390{
8391 /* The lazy approach for now... */
8392 uint8_t const *pbSrc;
8393 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8394 if (rc == VINF_SUCCESS)
8395 {
8396 *pbDst = *pbSrc;
8397 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8398 }
8399 return rc;
8400}
8401
8402
8403/**
8404 * Fetches a system table word.
8405 *
8406 * @returns Strict VBox status code.
8407 * @param pIemCpu The IEM per CPU data.
8408 * @param pu16Dst Where to return the word.
8409 * @param iSegReg The index of the segment register to use for
8410 * this access. The base and limits are checked.
8411 * @param GCPtrMem The address of the guest memory.
8412 */
8413IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8414{
8415 /* The lazy approach for now... */
8416 uint16_t const *pu16Src;
8417 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8418 if (rc == VINF_SUCCESS)
8419 {
8420 *pu16Dst = *pu16Src;
8421 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8422 }
8423 return rc;
8424}
8425
8426
8427/**
8428 * Fetches a system table dword.
8429 *
8430 * @returns Strict VBox status code.
8431 * @param pIemCpu The IEM per CPU data.
8432 * @param pu32Dst Where to return the dword.
8433 * @param iSegReg The index of the segment register to use for
8434 * this access. The base and limits are checked.
8435 * @param GCPtrMem The address of the guest memory.
8436 */
8437IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8438{
8439 /* The lazy approach for now... */
8440 uint32_t const *pu32Src;
8441 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8442 if (rc == VINF_SUCCESS)
8443 {
8444 *pu32Dst = *pu32Src;
8445 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8446 }
8447 return rc;
8448}
8449
8450
8451/**
8452 * Fetches a system table qword.
8453 *
8454 * @returns Strict VBox status code.
8455 * @param pIemCpu The IEM per CPU data.
8456 * @param pu64Dst Where to return the qword.
8457 * @param iSegReg The index of the segment register to use for
8458 * this access. The base and limits are checked.
8459 * @param GCPtrMem The address of the guest memory.
8460 */
8461IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8462{
8463 /* The lazy approach for now... */
8464 uint64_t const *pu64Src;
8465 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8466 if (rc == VINF_SUCCESS)
8467 {
8468 *pu64Dst = *pu64Src;
8469 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8470 }
8471 return rc;
8472}
8473
8474
8475/**
8476 * Fetches a descriptor table entry with caller specified error code.
8477 *
8478 * @returns Strict VBox status code.
8479 * @param pIemCpu The IEM per CPU.
8480 * @param pDesc Where to return the descriptor table entry.
8481 * @param uSel The selector which table entry to fetch.
8482 * @param uXcpt The exception to raise on table lookup error.
8483 * @param uErrorCode The error code associated with the exception.
8484 */
8485IEM_STATIC VBOXSTRICTRC
8486iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8487{
8488 AssertPtr(pDesc);
8489 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8490
8491 /** @todo did the 286 require all 8 bytes to be accessible? */
8492 /*
8493 * Get the selector table base and check bounds.
8494 */
8495 RTGCPTR GCPtrBase;
8496 if (uSel & X86_SEL_LDT)
8497 {
8498 if ( !pCtx->ldtr.Attr.n.u1Present
8499 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8500 {
8501 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8502 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8503 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8504 uErrorCode, 0);
8505 }
8506
8507 Assert(pCtx->ldtr.Attr.n.u1Present);
8508 GCPtrBase = pCtx->ldtr.u64Base;
8509 }
8510 else
8511 {
8512 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8513 {
8514 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8515 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8516 uErrorCode, 0);
8517 }
8518 GCPtrBase = pCtx->gdtr.pGdt;
8519 }
8520
8521 /*
8522 * Read the legacy descriptor and maybe the long mode extensions if
8523 * required.
8524 */
8525 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8526 if (rcStrict == VINF_SUCCESS)
8527 {
8528 if ( !IEM_IS_LONG_MODE(pIemCpu)
8529 || pDesc->Legacy.Gen.u1DescType)
8530 pDesc->Long.au64[1] = 0;
8531 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8532 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8533 else
8534 {
8535 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8536 /** @todo is this the right exception? */
8537 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8538 }
8539 }
8540 return rcStrict;
8541}
8542
8543
8544/**
8545 * Fetches a descriptor table entry.
8546 *
8547 * @returns Strict VBox status code.
8548 * @param pIemCpu The IEM per CPU.
8549 * @param pDesc Where to return the descriptor table entry.
8550 * @param uSel The selector which table entry to fetch.
8551 * @param uXcpt The exception to raise on table lookup error.
8552 */
8553IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8554{
8555 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8556}
8557
8558
8559/**
8560 * Fakes a long mode stack selector for SS = 0.
8561 *
8562 * @param pDescSs Where to return the fake stack descriptor.
8563 * @param uDpl The DPL we want.
8564 */
8565IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8566{
8567 pDescSs->Long.au64[0] = 0;
8568 pDescSs->Long.au64[1] = 0;
8569 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8570 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8571 pDescSs->Long.Gen.u2Dpl = uDpl;
8572 pDescSs->Long.Gen.u1Present = 1;
8573 pDescSs->Long.Gen.u1Long = 1;
8574}
8575
8576
8577/**
8578 * Marks the selector descriptor as accessed (only non-system descriptors).
8579 *
8580 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8581 * will therefore skip the limit checks.
8582 *
8583 * @returns Strict VBox status code.
8584 * @param pIemCpu The IEM per CPU.
8585 * @param uSel The selector.
8586 */
8587IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8588{
8589 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8590
8591 /*
8592 * Get the selector table base and calculate the entry address.
8593 */
8594 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8595 ? pCtx->ldtr.u64Base
8596 : pCtx->gdtr.pGdt;
8597 GCPtr += uSel & X86_SEL_MASK;
8598
8599 /*
8600 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8601 * ugly stuff to avoid this. This will make sure it's an atomic access
8602 * as well more or less remove any question about 8-bit or 32-bit accesss.
8603 */
8604 VBOXSTRICTRC rcStrict;
8605 uint32_t volatile *pu32;
8606 if ((GCPtr & 3) == 0)
8607 {
8608 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8609 GCPtr += 2 + 2;
8610 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8611 if (rcStrict != VINF_SUCCESS)
8612 return rcStrict;
8613 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8614 }
8615 else
8616 {
8617 /* The misaligned GDT/LDT case, map the whole thing. */
8618 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8619 if (rcStrict != VINF_SUCCESS)
8620 return rcStrict;
8621 switch ((uintptr_t)pu32 & 3)
8622 {
8623 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8624 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8625 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8626 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8627 }
8628 }
8629
8630 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8631}
8632
8633/** @} */
8634
8635
8636/*
8637 * Include the C/C++ implementation of instruction.
8638 */
8639#include "IEMAllCImpl.cpp.h"
8640
8641
8642
8643/** @name "Microcode" macros.
8644 *
8645 * The idea is that we should be able to use the same code to interpret
8646 * instructions as well as recompiler instructions. Thus this obfuscation.
8647 *
8648 * @{
8649 */
8650#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8651#define IEM_MC_END() }
8652#define IEM_MC_PAUSE() do {} while (0)
8653#define IEM_MC_CONTINUE() do {} while (0)
8654
8655/** Internal macro. */
8656#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8657 do \
8658 { \
8659 VBOXSTRICTRC rcStrict2 = a_Expr; \
8660 if (rcStrict2 != VINF_SUCCESS) \
8661 return rcStrict2; \
8662 } while (0)
8663
8664#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8665#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8666#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8667#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8668#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8669#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8670#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8671
8672#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8673#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8674 do { \
8675 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8676 return iemRaiseDeviceNotAvailable(pIemCpu); \
8677 } while (0)
8678#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8679 do { \
8680 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8681 return iemRaiseMathFault(pIemCpu); \
8682 } while (0)
8683#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8684 do { \
8685 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8686 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8687 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8688 return iemRaiseUndefinedOpcode(pIemCpu); \
8689 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8690 return iemRaiseDeviceNotAvailable(pIemCpu); \
8691 } while (0)
8692#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
8693 do { \
8694 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8695 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8696 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse) \
8697 return iemRaiseUndefinedOpcode(pIemCpu); \
8698 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8699 return iemRaiseDeviceNotAvailable(pIemCpu); \
8700 } while (0)
8701#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8702 do { \
8703 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8704 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8705 return iemRaiseUndefinedOpcode(pIemCpu); \
8706 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8707 return iemRaiseDeviceNotAvailable(pIemCpu); \
8708 } while (0)
8709#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8710 do { \
8711 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8712 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8713 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8714 return iemRaiseUndefinedOpcode(pIemCpu); \
8715 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8716 return iemRaiseDeviceNotAvailable(pIemCpu); \
8717 } while (0)
8718#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8719 do { \
8720 if (pIemCpu->uCpl != 0) \
8721 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8722 } while (0)
8723
8724
8725#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8726#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8727#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8728#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8729#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8730#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8731#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8732 uint32_t a_Name; \
8733 uint32_t *a_pName = &a_Name
8734#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8735 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8736
8737#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8738#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8739
8740#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8741#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8742#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8743#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8744#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8745#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8746#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8747#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8748#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8749#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8750#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8751#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8752#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8753#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8754#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8755#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8756#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8757#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8758#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8759#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8760#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8761#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8762#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8763#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8764#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8765#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8766#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8767#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8768#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8769/** @note Not for IOPL or IF testing or modification. */
8770#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8771#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8772#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8773#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8774
8775#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8776#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8777#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8778#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8779#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8780#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8781#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8782#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8783#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8784#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8785#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8786 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8787
8788#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8789#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8790/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8791 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8792#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8793#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8794/** @note Not for IOPL or IF testing or modification. */
8795#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8796
8797#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8798#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8799#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8800 do { \
8801 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8802 *pu32Reg += (a_u32Value); \
8803 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8804 } while (0)
8805#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8806
8807#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8808#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8809#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8810 do { \
8811 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8812 *pu32Reg -= (a_u32Value); \
8813 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8814 } while (0)
8815#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8816#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
8817
8818#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8819#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8820#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8821#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8822#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8823#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8824#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8825
8826#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8827#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8828#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8829#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8830
8831#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8832#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8833#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8834
8835#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8836#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
8837#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8838
8839#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8840#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8841#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8842
8843#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8844#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8845#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8846
8847#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8848
8849#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8850
8851#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8852#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8853#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8854 do { \
8855 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8856 *pu32Reg &= (a_u32Value); \
8857 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8858 } while (0)
8859#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8860
8861#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8862#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8863#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8864 do { \
8865 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8866 *pu32Reg |= (a_u32Value); \
8867 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8868 } while (0)
8869#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8870
8871
8872/** @note Not for IOPL or IF modification. */
8873#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8874/** @note Not for IOPL or IF modification. */
8875#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8876/** @note Not for IOPL or IF modification. */
8877#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8878
8879#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8880
8881
8882#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8883 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8884#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8885 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8886#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8887 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8888#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8889 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8890#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8891 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8892#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8893 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8894#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8895 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8896
8897#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8898 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8899#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8900 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8901#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8902 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8903#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8904 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8905#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8906 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8907 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8908 } while (0)
8909#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8910 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8911 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8912 } while (0)
8913#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8914 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8915#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8916 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8917#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8918 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8919#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
8920 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
8921 = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
8922
8923#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8924 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8925#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8926 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8927#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8928 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8929
8930#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8931 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8932#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8933 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8934#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8935 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8936
8937#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8938 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8939#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8940 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8941#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8942 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8943
8944#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8945 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8946
8947#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8948 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8949#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8950 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8951#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8952 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8953#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8954 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8955
8956#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8957 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8958#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8959 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8960#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8961 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8962
8963#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8964 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8965#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8966 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8967
8968
8969
8970#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8971 do { \
8972 uint8_t u8Tmp; \
8973 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8974 (a_u16Dst) = u8Tmp; \
8975 } while (0)
8976#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8977 do { \
8978 uint8_t u8Tmp; \
8979 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8980 (a_u32Dst) = u8Tmp; \
8981 } while (0)
8982#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8983 do { \
8984 uint8_t u8Tmp; \
8985 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8986 (a_u64Dst) = u8Tmp; \
8987 } while (0)
8988#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8989 do { \
8990 uint16_t u16Tmp; \
8991 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8992 (a_u32Dst) = u16Tmp; \
8993 } while (0)
8994#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8995 do { \
8996 uint16_t u16Tmp; \
8997 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8998 (a_u64Dst) = u16Tmp; \
8999 } while (0)
9000#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
9001 do { \
9002 uint32_t u32Tmp; \
9003 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
9004 (a_u64Dst) = u32Tmp; \
9005 } while (0)
9006
9007#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
9008 do { \
9009 uint8_t u8Tmp; \
9010 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
9011 (a_u16Dst) = (int8_t)u8Tmp; \
9012 } while (0)
9013#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
9014 do { \
9015 uint8_t u8Tmp; \
9016 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
9017 (a_u32Dst) = (int8_t)u8Tmp; \
9018 } while (0)
9019#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
9020 do { \
9021 uint8_t u8Tmp; \
9022 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
9023 (a_u64Dst) = (int8_t)u8Tmp; \
9024 } while (0)
9025#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
9026 do { \
9027 uint16_t u16Tmp; \
9028 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
9029 (a_u32Dst) = (int16_t)u16Tmp; \
9030 } while (0)
9031#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
9032 do { \
9033 uint16_t u16Tmp; \
9034 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
9035 (a_u64Dst) = (int16_t)u16Tmp; \
9036 } while (0)
9037#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
9038 do { \
9039 uint32_t u32Tmp; \
9040 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
9041 (a_u64Dst) = (int32_t)u32Tmp; \
9042 } while (0)
9043
9044#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
9045 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
9046#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
9047 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
9048#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
9049 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
9050#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
9051 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
9052
9053#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
9054 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
9055#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
9056 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
9057#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
9058 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
9059#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
9060 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
9061
9062#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
9063#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
9064#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
9065#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
9066#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
9067#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
9068#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
9069 do { \
9070 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
9071 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
9072 } while (0)
9073
9074#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
9075 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
9076#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
9077 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
9078
9079
9080#define IEM_MC_PUSH_U16(a_u16Value) \
9081 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
9082#define IEM_MC_PUSH_U32(a_u32Value) \
9083 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
9084#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
9085 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
9086#define IEM_MC_PUSH_U64(a_u64Value) \
9087 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
9088
9089#define IEM_MC_POP_U16(a_pu16Value) \
9090 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
9091#define IEM_MC_POP_U32(a_pu32Value) \
9092 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
9093#define IEM_MC_POP_U64(a_pu64Value) \
9094 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
9095
9096/** Maps guest memory for direct or bounce buffered access.
9097 * The purpose is to pass it to an operand implementation, thus the a_iArg.
9098 * @remarks May return.
9099 */
9100#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
9101 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
9102
9103/** Maps guest memory for direct or bounce buffered access.
9104 * The purpose is to pass it to an operand implementation, thus the a_iArg.
9105 * @remarks May return.
9106 */
9107#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
9108 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
9109
9110/** Commits the memory and unmaps the guest memory.
9111 * @remarks May return.
9112 */
9113#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
9114 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
9115
9116/** Commits the memory and unmaps the guest memory unless the FPU status word
9117 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
9118 * that would cause FLD not to store.
9119 *
9120 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
9121 * store, while \#P will not.
9122 *
9123 * @remarks May in theory return - for now.
9124 */
9125#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
9126 do { \
9127 if ( !(a_u16FSW & X86_FSW_ES) \
9128 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
9129 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
9130 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
9131 } while (0)
9132
9133/** Calculate efficient address from R/M. */
9134#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
9135 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
9136
9137#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
9138#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
9139#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
9140#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
9141#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
9142#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
9143#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
9144
9145/**
9146 * Defers the rest of the instruction emulation to a C implementation routine
9147 * and returns, only taking the standard parameters.
9148 *
9149 * @param a_pfnCImpl The pointer to the C routine.
9150 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
9151 */
9152#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
9153
9154/**
9155 * Defers the rest of instruction emulation to a C implementation routine and
9156 * returns, taking one argument in addition to the standard ones.
9157 *
9158 * @param a_pfnCImpl The pointer to the C routine.
9159 * @param a0 The argument.
9160 */
9161#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
9162
9163/**
9164 * Defers the rest of the instruction emulation to a C implementation routine
9165 * and returns, taking two arguments in addition to the standard ones.
9166 *
9167 * @param a_pfnCImpl The pointer to the C routine.
9168 * @param a0 The first extra argument.
9169 * @param a1 The second extra argument.
9170 */
9171#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
9172
9173/**
9174 * Defers the rest of the instruction emulation to a C implementation routine
9175 * and returns, taking three arguments in addition to the standard ones.
9176 *
9177 * @param a_pfnCImpl The pointer to the C routine.
9178 * @param a0 The first extra argument.
9179 * @param a1 The second extra argument.
9180 * @param a2 The third extra argument.
9181 */
9182#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
9183
9184/**
9185 * Defers the rest of the instruction emulation to a C implementation routine
9186 * and returns, taking four arguments in addition to the standard ones.
9187 *
9188 * @param a_pfnCImpl The pointer to the C routine.
9189 * @param a0 The first extra argument.
9190 * @param a1 The second extra argument.
9191 * @param a2 The third extra argument.
9192 * @param a3 The fourth extra argument.
9193 */
9194#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
9195
9196/**
9197 * Defers the rest of the instruction emulation to a C implementation routine
9198 * and returns, taking two arguments in addition to the standard ones.
9199 *
9200 * @param a_pfnCImpl The pointer to the C routine.
9201 * @param a0 The first extra argument.
9202 * @param a1 The second extra argument.
9203 * @param a2 The third extra argument.
9204 * @param a3 The fourth extra argument.
9205 * @param a4 The fifth extra argument.
9206 */
9207#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
9208
9209/**
9210 * Defers the entire instruction emulation to a C implementation routine and
9211 * returns, only taking the standard parameters.
9212 *
9213 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9214 *
9215 * @param a_pfnCImpl The pointer to the C routine.
9216 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
9217 */
9218#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
9219
9220/**
9221 * Defers the entire instruction emulation to a C implementation routine and
9222 * returns, taking one argument in addition to the standard ones.
9223 *
9224 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9225 *
9226 * @param a_pfnCImpl The pointer to the C routine.
9227 * @param a0 The argument.
9228 */
9229#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
9230
9231/**
9232 * Defers the entire instruction emulation to a C implementation routine and
9233 * returns, taking two arguments in addition to the standard ones.
9234 *
9235 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9236 *
9237 * @param a_pfnCImpl The pointer to the C routine.
9238 * @param a0 The first extra argument.
9239 * @param a1 The second extra argument.
9240 */
9241#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
9242
9243/**
9244 * Defers the entire instruction emulation to a C implementation routine and
9245 * returns, taking three arguments in addition to the standard ones.
9246 *
9247 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9248 *
9249 * @param a_pfnCImpl The pointer to the C routine.
9250 * @param a0 The first extra argument.
9251 * @param a1 The second extra argument.
9252 * @param a2 The third extra argument.
9253 */
9254#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
9255
9256/**
9257 * Calls a FPU assembly implementation taking one visible argument.
9258 *
9259 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9260 * @param a0 The first extra argument.
9261 */
9262#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
9263 do { \
9264 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
9265 } while (0)
9266
9267/**
9268 * Calls a FPU assembly implementation taking two visible arguments.
9269 *
9270 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9271 * @param a0 The first extra argument.
9272 * @param a1 The second extra argument.
9273 */
9274#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9275 do { \
9276 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9277 } while (0)
9278
9279/**
9280 * Calls a FPU assembly implementation taking three visible arguments.
9281 *
9282 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9283 * @param a0 The first extra argument.
9284 * @param a1 The second extra argument.
9285 * @param a2 The third extra argument.
9286 */
9287#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9288 do { \
9289 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9290 } while (0)
9291
9292#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9293 do { \
9294 (a_FpuData).FSW = (a_FSW); \
9295 (a_FpuData).r80Result = *(a_pr80Value); \
9296 } while (0)
9297
9298/** Pushes FPU result onto the stack. */
9299#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9300 iemFpuPushResult(pIemCpu, &a_FpuData)
9301/** Pushes FPU result onto the stack and sets the FPUDP. */
9302#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9303 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9304
9305/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9306#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9307 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9308
9309/** Stores FPU result in a stack register. */
9310#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9311 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9312/** Stores FPU result in a stack register and pops the stack. */
9313#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9314 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9315/** Stores FPU result in a stack register and sets the FPUDP. */
9316#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9317 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9318/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9319 * stack. */
9320#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9321 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9322
9323/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9324#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9325 iemFpuUpdateOpcodeAndIp(pIemCpu)
9326/** Free a stack register (for FFREE and FFREEP). */
9327#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9328 iemFpuStackFree(pIemCpu, a_iStReg)
9329/** Increment the FPU stack pointer. */
9330#define IEM_MC_FPU_STACK_INC_TOP() \
9331 iemFpuStackIncTop(pIemCpu)
9332/** Decrement the FPU stack pointer. */
9333#define IEM_MC_FPU_STACK_DEC_TOP() \
9334 iemFpuStackDecTop(pIemCpu)
9335
9336/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9337#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9338 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9339/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9340#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9341 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9342/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9343#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9344 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9345/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9346#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9347 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9348/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9349 * stack. */
9350#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9351 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9352/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9353#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9354 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9355
9356/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9357#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9358 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9359/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9360 * stack. */
9361#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9362 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9363/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9364 * FPUDS. */
9365#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9366 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9367/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9368 * FPUDS. Pops stack. */
9369#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9370 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9371/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9372 * stack twice. */
9373#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9374 iemFpuStackUnderflowThenPopPop(pIemCpu)
9375/** Raises a FPU stack underflow exception for an instruction pushing a result
9376 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9377#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9378 iemFpuStackPushUnderflow(pIemCpu)
9379/** Raises a FPU stack underflow exception for an instruction pushing a result
9380 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9381#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9382 iemFpuStackPushUnderflowTwo(pIemCpu)
9383
9384/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9385 * FPUIP, FPUCS and FOP. */
9386#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9387 iemFpuStackPushOverflow(pIemCpu)
9388/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9389 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9390#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9391 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9392/** Prepares for using the FPU state.
9393 * Ensures that we can use the host FPU in the current context (RC+R0.
9394 * Ensures the guest FPU state in the CPUMCTX is up to date. */
9395#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pIemCpu)
9396/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
9397#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pIemCpu)
9398/** Actualizes the guest FPU state so it can be accessed and modified. */
9399#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pIemCpu)
9400
9401/** Prepares for using the SSE state.
9402 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
9403 * Ensures the guest SSE state in the CPUMCTX is up to date. */
9404#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pIemCpu)
9405/** Actualizes the guest XMM0..15 register state for read-only access. */
9406#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pIemCpu)
9407/** Actualizes the guest XMM0..15 register state for read-write access. */
9408#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pIemCpu)
9409
9410/**
9411 * Calls a MMX assembly implementation taking two visible arguments.
9412 *
9413 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9414 * @param a0 The first extra argument.
9415 * @param a1 The second extra argument.
9416 */
9417#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9418 do { \
9419 IEM_MC_PREPARE_FPU_USAGE(); \
9420 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9421 } while (0)
9422
9423/**
9424 * Calls a MMX assembly implementation taking three visible arguments.
9425 *
9426 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9427 * @param a0 The first extra argument.
9428 * @param a1 The second extra argument.
9429 * @param a2 The third extra argument.
9430 */
9431#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9432 do { \
9433 IEM_MC_PREPARE_FPU_USAGE(); \
9434 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9435 } while (0)
9436
9437
9438/**
9439 * Calls a SSE assembly implementation taking two visible arguments.
9440 *
9441 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9442 * @param a0 The first extra argument.
9443 * @param a1 The second extra argument.
9444 */
9445#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9446 do { \
9447 IEM_MC_PREPARE_SSE_USAGE(); \
9448 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9449 } while (0)
9450
9451/**
9452 * Calls a SSE assembly implementation taking three visible arguments.
9453 *
9454 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9455 * @param a0 The first extra argument.
9456 * @param a1 The second extra argument.
9457 * @param a2 The third extra argument.
9458 */
9459#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9460 do { \
9461 IEM_MC_PREPARE_SSE_USAGE(); \
9462 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9463 } while (0)
9464
9465/** @note Not for IOPL or IF testing. */
9466#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9467/** @note Not for IOPL or IF testing. */
9468#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9469/** @note Not for IOPL or IF testing. */
9470#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9471/** @note Not for IOPL or IF testing. */
9472#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9473/** @note Not for IOPL or IF testing. */
9474#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9475 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9476 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9477/** @note Not for IOPL or IF testing. */
9478#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9479 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9480 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9481/** @note Not for IOPL or IF testing. */
9482#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9483 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9484 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9485 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9486/** @note Not for IOPL or IF testing. */
9487#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9488 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9489 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9490 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9491#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9492#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9493#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9494/** @note Not for IOPL or IF testing. */
9495#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9496 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9497 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9498/** @note Not for IOPL or IF testing. */
9499#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9500 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9501 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9502/** @note Not for IOPL or IF testing. */
9503#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9504 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9505 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9506/** @note Not for IOPL or IF testing. */
9507#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9508 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9509 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9510/** @note Not for IOPL or IF testing. */
9511#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9512 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9513 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9514/** @note Not for IOPL or IF testing. */
9515#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9516 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9517 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9518#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9519#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9520
9521#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9522 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9523#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9524 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9525#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9526 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9527#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9528 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9529#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9530 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9531#define IEM_MC_IF_FCW_IM() \
9532 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9533
9534#define IEM_MC_ELSE() } else {
9535#define IEM_MC_ENDIF() } do {} while (0)
9536
9537/** @} */
9538
9539
9540/** @name Opcode Debug Helpers.
9541 * @{
9542 */
9543#ifdef DEBUG
9544# define IEMOP_MNEMONIC(a_szMnemonic) \
9545 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9546 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9547# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9548 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9549 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9550#else
9551# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9552# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9553#endif
9554
9555/** @} */
9556
9557
9558/** @name Opcode Helpers.
9559 * @{
9560 */
9561
9562#ifdef IN_RING3
9563# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9564 do { \
9565 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9566 else \
9567 { \
9568 DBGFSTOP(IEMCPU_TO_VM(pIemCpu)); \
9569 return IEMOP_RAISE_INVALID_OPCODE(); \
9570 } \
9571 } while (0)
9572#else
9573# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9574 do { \
9575 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9576 else return IEMOP_RAISE_INVALID_OPCODE(); \
9577 } while (0)
9578#endif
9579
9580/** The instruction requires a 186 or later. */
9581#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
9582# define IEMOP_HLP_MIN_186() do { } while (0)
9583#else
9584# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
9585#endif
9586
9587/** The instruction requires a 286 or later. */
9588#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
9589# define IEMOP_HLP_MIN_286() do { } while (0)
9590#else
9591# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
9592#endif
9593
9594/** The instruction requires a 386 or later. */
9595#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9596# define IEMOP_HLP_MIN_386() do { } while (0)
9597#else
9598# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
9599#endif
9600
9601/** The instruction requires a 386 or later if the given expression is true. */
9602#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9603# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
9604#else
9605# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
9606#endif
9607
9608/** The instruction requires a 486 or later. */
9609#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
9610# define IEMOP_HLP_MIN_486() do { } while (0)
9611#else
9612# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
9613#endif
9614
9615/** The instruction requires a Pentium (586) or later. */
9616#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_586
9617# define IEMOP_HLP_MIN_586() do { } while (0)
9618#else
9619# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_586, true)
9620#endif
9621
9622/** The instruction requires a PentiumPro (686) or later. */
9623#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_686
9624# define IEMOP_HLP_MIN_686() do { } while (0)
9625#else
9626# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_686, true)
9627#endif
9628
9629
9630/** The instruction raises an \#UD in real and V8086 mode. */
9631#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9632 do \
9633 { \
9634 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9635 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9636 } while (0)
9637
9638/** The instruction allows no lock prefixing (in this encoding), throw \#UD if
9639 * lock prefixed.
9640 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9641#define IEMOP_HLP_NO_LOCK_PREFIX() \
9642 do \
9643 { \
9644 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9645 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9646 } while (0)
9647
9648/** The instruction is not available in 64-bit mode, throw \#UD if we're in
9649 * 64-bit mode. */
9650#define IEMOP_HLP_NO_64BIT() \
9651 do \
9652 { \
9653 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9654 return IEMOP_RAISE_INVALID_OPCODE(); \
9655 } while (0)
9656
9657/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
9658 * 64-bit mode. */
9659#define IEMOP_HLP_ONLY_64BIT() \
9660 do \
9661 { \
9662 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9663 return IEMOP_RAISE_INVALID_OPCODE(); \
9664 } while (0)
9665
9666/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9667#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9668 do \
9669 { \
9670 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9671 iemRecalEffOpSize64Default(pIemCpu); \
9672 } while (0)
9673
9674/** The instruction has 64-bit operand size if 64-bit mode. */
9675#define IEMOP_HLP_64BIT_OP_SIZE() \
9676 do \
9677 { \
9678 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9679 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9680 } while (0)
9681
9682/** Only a REX prefix immediately preceeding the first opcode byte takes
9683 * effect. This macro helps ensuring this as well as logging bad guest code. */
9684#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9685 do \
9686 { \
9687 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9688 { \
9689 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9690 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9691 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9692 pIemCpu->uRexB = 0; \
9693 pIemCpu->uRexIndex = 0; \
9694 pIemCpu->uRexReg = 0; \
9695 iemRecalEffOpSize(pIemCpu); \
9696 } \
9697 } while (0)
9698
9699/**
9700 * Done decoding.
9701 */
9702#define IEMOP_HLP_DONE_DECODING() \
9703 do \
9704 { \
9705 /*nothing for now, maybe later... */ \
9706 } while (0)
9707
9708/**
9709 * Done decoding, raise \#UD exception if lock prefix present.
9710 */
9711#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9712 do \
9713 { \
9714 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9715 { /* likely */ } \
9716 else \
9717 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9718 } while (0)
9719#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9720 do \
9721 { \
9722 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9723 { /* likely */ } \
9724 else \
9725 { \
9726 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9727 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9728 } \
9729 } while (0)
9730#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9731 do \
9732 { \
9733 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9734 { /* likely */ } \
9735 else \
9736 { \
9737 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9738 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9739 } \
9740 } while (0)
9741/**
9742 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9743 * are present.
9744 */
9745#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9746 do \
9747 { \
9748 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9749 { /* likely */ } \
9750 else \
9751 return IEMOP_RAISE_INVALID_OPCODE(); \
9752 } while (0)
9753
9754
9755/**
9756 * Calculates the effective address of a ModR/M memory operand.
9757 *
9758 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9759 *
9760 * @return Strict VBox status code.
9761 * @param pIemCpu The IEM per CPU data.
9762 * @param bRm The ModRM byte.
9763 * @param cbImm The size of any immediate following the
9764 * effective address opcode bytes. Important for
9765 * RIP relative addressing.
9766 * @param pGCPtrEff Where to return the effective address.
9767 */
9768IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9769{
9770 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9771 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9772#define SET_SS_DEF() \
9773 do \
9774 { \
9775 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9776 pIemCpu->iEffSeg = X86_SREG_SS; \
9777 } while (0)
9778
9779 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9780 {
9781/** @todo Check the effective address size crap! */
9782 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9783 {
9784 uint16_t u16EffAddr;
9785
9786 /* Handle the disp16 form with no registers first. */
9787 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9788 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9789 else
9790 {
9791 /* Get the displacment. */
9792 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9793 {
9794 case 0: u16EffAddr = 0; break;
9795 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9796 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9797 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9798 }
9799
9800 /* Add the base and index registers to the disp. */
9801 switch (bRm & X86_MODRM_RM_MASK)
9802 {
9803 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9804 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9805 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9806 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9807 case 4: u16EffAddr += pCtx->si; break;
9808 case 5: u16EffAddr += pCtx->di; break;
9809 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9810 case 7: u16EffAddr += pCtx->bx; break;
9811 }
9812 }
9813
9814 *pGCPtrEff = u16EffAddr;
9815 }
9816 else
9817 {
9818 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9819 uint32_t u32EffAddr;
9820
9821 /* Handle the disp32 form with no registers first. */
9822 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9823 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9824 else
9825 {
9826 /* Get the register (or SIB) value. */
9827 switch ((bRm & X86_MODRM_RM_MASK))
9828 {
9829 case 0: u32EffAddr = pCtx->eax; break;
9830 case 1: u32EffAddr = pCtx->ecx; break;
9831 case 2: u32EffAddr = pCtx->edx; break;
9832 case 3: u32EffAddr = pCtx->ebx; break;
9833 case 4: /* SIB */
9834 {
9835 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9836
9837 /* Get the index and scale it. */
9838 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9839 {
9840 case 0: u32EffAddr = pCtx->eax; break;
9841 case 1: u32EffAddr = pCtx->ecx; break;
9842 case 2: u32EffAddr = pCtx->edx; break;
9843 case 3: u32EffAddr = pCtx->ebx; break;
9844 case 4: u32EffAddr = 0; /*none */ break;
9845 case 5: u32EffAddr = pCtx->ebp; break;
9846 case 6: u32EffAddr = pCtx->esi; break;
9847 case 7: u32EffAddr = pCtx->edi; break;
9848 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9849 }
9850 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9851
9852 /* add base */
9853 switch (bSib & X86_SIB_BASE_MASK)
9854 {
9855 case 0: u32EffAddr += pCtx->eax; break;
9856 case 1: u32EffAddr += pCtx->ecx; break;
9857 case 2: u32EffAddr += pCtx->edx; break;
9858 case 3: u32EffAddr += pCtx->ebx; break;
9859 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9860 case 5:
9861 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9862 {
9863 u32EffAddr += pCtx->ebp;
9864 SET_SS_DEF();
9865 }
9866 else
9867 {
9868 uint32_t u32Disp;
9869 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9870 u32EffAddr += u32Disp;
9871 }
9872 break;
9873 case 6: u32EffAddr += pCtx->esi; break;
9874 case 7: u32EffAddr += pCtx->edi; break;
9875 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9876 }
9877 break;
9878 }
9879 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9880 case 6: u32EffAddr = pCtx->esi; break;
9881 case 7: u32EffAddr = pCtx->edi; break;
9882 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9883 }
9884
9885 /* Get and add the displacement. */
9886 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9887 {
9888 case 0:
9889 break;
9890 case 1:
9891 {
9892 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9893 u32EffAddr += i8Disp;
9894 break;
9895 }
9896 case 2:
9897 {
9898 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9899 u32EffAddr += u32Disp;
9900 break;
9901 }
9902 default:
9903 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9904 }
9905
9906 }
9907 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9908 *pGCPtrEff = u32EffAddr;
9909 else
9910 {
9911 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9912 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9913 }
9914 }
9915 }
9916 else
9917 {
9918 uint64_t u64EffAddr;
9919
9920 /* Handle the rip+disp32 form with no registers first. */
9921 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9922 {
9923 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9924 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9925 }
9926 else
9927 {
9928 /* Get the register (or SIB) value. */
9929 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9930 {
9931 case 0: u64EffAddr = pCtx->rax; break;
9932 case 1: u64EffAddr = pCtx->rcx; break;
9933 case 2: u64EffAddr = pCtx->rdx; break;
9934 case 3: u64EffAddr = pCtx->rbx; break;
9935 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9936 case 6: u64EffAddr = pCtx->rsi; break;
9937 case 7: u64EffAddr = pCtx->rdi; break;
9938 case 8: u64EffAddr = pCtx->r8; break;
9939 case 9: u64EffAddr = pCtx->r9; break;
9940 case 10: u64EffAddr = pCtx->r10; break;
9941 case 11: u64EffAddr = pCtx->r11; break;
9942 case 13: u64EffAddr = pCtx->r13; break;
9943 case 14: u64EffAddr = pCtx->r14; break;
9944 case 15: u64EffAddr = pCtx->r15; break;
9945 /* SIB */
9946 case 4:
9947 case 12:
9948 {
9949 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9950
9951 /* Get the index and scale it. */
9952 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9953 {
9954 case 0: u64EffAddr = pCtx->rax; break;
9955 case 1: u64EffAddr = pCtx->rcx; break;
9956 case 2: u64EffAddr = pCtx->rdx; break;
9957 case 3: u64EffAddr = pCtx->rbx; break;
9958 case 4: u64EffAddr = 0; /*none */ break;
9959 case 5: u64EffAddr = pCtx->rbp; break;
9960 case 6: u64EffAddr = pCtx->rsi; break;
9961 case 7: u64EffAddr = pCtx->rdi; break;
9962 case 8: u64EffAddr = pCtx->r8; break;
9963 case 9: u64EffAddr = pCtx->r9; break;
9964 case 10: u64EffAddr = pCtx->r10; break;
9965 case 11: u64EffAddr = pCtx->r11; break;
9966 case 12: u64EffAddr = pCtx->r12; break;
9967 case 13: u64EffAddr = pCtx->r13; break;
9968 case 14: u64EffAddr = pCtx->r14; break;
9969 case 15: u64EffAddr = pCtx->r15; break;
9970 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9971 }
9972 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9973
9974 /* add base */
9975 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9976 {
9977 case 0: u64EffAddr += pCtx->rax; break;
9978 case 1: u64EffAddr += pCtx->rcx; break;
9979 case 2: u64EffAddr += pCtx->rdx; break;
9980 case 3: u64EffAddr += pCtx->rbx; break;
9981 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9982 case 6: u64EffAddr += pCtx->rsi; break;
9983 case 7: u64EffAddr += pCtx->rdi; break;
9984 case 8: u64EffAddr += pCtx->r8; break;
9985 case 9: u64EffAddr += pCtx->r9; break;
9986 case 10: u64EffAddr += pCtx->r10; break;
9987 case 11: u64EffAddr += pCtx->r11; break;
9988 case 12: u64EffAddr += pCtx->r12; break;
9989 case 14: u64EffAddr += pCtx->r14; break;
9990 case 15: u64EffAddr += pCtx->r15; break;
9991 /* complicated encodings */
9992 case 5:
9993 case 13:
9994 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9995 {
9996 if (!pIemCpu->uRexB)
9997 {
9998 u64EffAddr += pCtx->rbp;
9999 SET_SS_DEF();
10000 }
10001 else
10002 u64EffAddr += pCtx->r13;
10003 }
10004 else
10005 {
10006 uint32_t u32Disp;
10007 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
10008 u64EffAddr += (int32_t)u32Disp;
10009 }
10010 break;
10011 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10012 }
10013 break;
10014 }
10015 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10016 }
10017
10018 /* Get and add the displacement. */
10019 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
10020 {
10021 case 0:
10022 break;
10023 case 1:
10024 {
10025 int8_t i8Disp;
10026 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
10027 u64EffAddr += i8Disp;
10028 break;
10029 }
10030 case 2:
10031 {
10032 uint32_t u32Disp;
10033 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
10034 u64EffAddr += (int32_t)u32Disp;
10035 break;
10036 }
10037 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
10038 }
10039
10040 }
10041
10042 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
10043 *pGCPtrEff = u64EffAddr;
10044 else
10045 {
10046 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
10047 *pGCPtrEff = u64EffAddr & UINT32_MAX;
10048 }
10049 }
10050
10051 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
10052 return VINF_SUCCESS;
10053}
10054
10055/** @} */
10056
10057
10058
10059/*
10060 * Include the instructions
10061 */
10062#include "IEMAllInstructions.cpp.h"
10063
10064
10065
10066
10067#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10068
10069/**
10070 * Sets up execution verification mode.
10071 */
10072IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
10073{
10074 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10075 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
10076
10077 /*
10078 * Always note down the address of the current instruction.
10079 */
10080 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
10081 pIemCpu->uOldRip = pOrgCtx->rip;
10082
10083 /*
10084 * Enable verification and/or logging.
10085 */
10086 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
10087 if ( fNewNoRem
10088 && ( 0
10089#if 0 /* auto enable on first paged protected mode interrupt */
10090 || ( pOrgCtx->eflags.Bits.u1IF
10091 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
10092 && TRPMHasTrap(pVCpu)
10093 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
10094#endif
10095#if 0
10096 || ( pOrgCtx->cs == 0x10
10097 && ( pOrgCtx->rip == 0x90119e3e
10098 || pOrgCtx->rip == 0x901d9810)
10099#endif
10100#if 0 /* Auto enable DSL - FPU stuff. */
10101 || ( pOrgCtx->cs == 0x10
10102 && (// pOrgCtx->rip == 0xc02ec07f
10103 //|| pOrgCtx->rip == 0xc02ec082
10104 //|| pOrgCtx->rip == 0xc02ec0c9
10105 0
10106 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
10107#endif
10108#if 0 /* Auto enable DSL - fstp st0 stuff. */
10109 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
10110#endif
10111#if 0
10112 || pOrgCtx->rip == 0x9022bb3a
10113#endif
10114#if 0
10115 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
10116#endif
10117#if 0
10118 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
10119 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
10120#endif
10121#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
10122 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
10123 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
10124 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
10125#endif
10126#if 0 /* NT4SP1 - xadd early boot. */
10127 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
10128#endif
10129#if 0 /* NT4SP1 - wrmsr (intel MSR). */
10130 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
10131#endif
10132#if 0 /* NT4SP1 - cmpxchg (AMD). */
10133 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
10134#endif
10135#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
10136 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
10137#endif
10138#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
10139 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
10140
10141#endif
10142#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
10143 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
10144
10145#endif
10146#if 0 /* NT4SP1 - frstor [ecx] */
10147 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
10148#endif
10149#if 0 /* xxxxxx - All long mode code. */
10150 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
10151#endif
10152#if 0 /* rep movsq linux 3.7 64-bit boot. */
10153 || (pOrgCtx->rip == 0x0000000000100241)
10154#endif
10155#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
10156 || (pOrgCtx->rip == 0x000000000215e240)
10157#endif
10158#if 0 /* DOS's size-overridden iret to v8086. */
10159 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
10160#endif
10161 )
10162 )
10163 {
10164 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
10165 RTLogFlags(NULL, "enabled");
10166 fNewNoRem = false;
10167 }
10168 if (fNewNoRem != pIemCpu->fNoRem)
10169 {
10170 pIemCpu->fNoRem = fNewNoRem;
10171 if (!fNewNoRem)
10172 {
10173 LogAlways(("Enabling verification mode!\n"));
10174 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
10175 }
10176 else
10177 LogAlways(("Disabling verification mode!\n"));
10178 }
10179
10180 /*
10181 * Switch state.
10182 */
10183 if (IEM_VERIFICATION_ENABLED(pIemCpu))
10184 {
10185 static CPUMCTX s_DebugCtx; /* Ugly! */
10186
10187 s_DebugCtx = *pOrgCtx;
10188 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
10189 }
10190
10191 /*
10192 * See if there is an interrupt pending in TRPM and inject it if we can.
10193 */
10194 pIemCpu->uInjectCpl = UINT8_MAX;
10195 if ( pOrgCtx->eflags.Bits.u1IF
10196 && TRPMHasTrap(pVCpu)
10197 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
10198 {
10199 uint8_t u8TrapNo;
10200 TRPMEVENT enmType;
10201 RTGCUINT uErrCode;
10202 RTGCPTR uCr2;
10203 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
10204 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
10205 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10206 TRPMResetTrap(pVCpu);
10207 pIemCpu->uInjectCpl = pIemCpu->uCpl;
10208 }
10209
10210 /*
10211 * Reset the counters.
10212 */
10213 pIemCpu->cIOReads = 0;
10214 pIemCpu->cIOWrites = 0;
10215 pIemCpu->fIgnoreRaxRdx = false;
10216 pIemCpu->fOverlappingMovs = false;
10217 pIemCpu->fProblematicMemory = false;
10218 pIemCpu->fUndefinedEFlags = 0;
10219
10220 if (IEM_VERIFICATION_ENABLED(pIemCpu))
10221 {
10222 /*
10223 * Free all verification records.
10224 */
10225 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
10226 pIemCpu->pIemEvtRecHead = NULL;
10227 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
10228 do
10229 {
10230 while (pEvtRec)
10231 {
10232 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
10233 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
10234 pIemCpu->pFreeEvtRec = pEvtRec;
10235 pEvtRec = pNext;
10236 }
10237 pEvtRec = pIemCpu->pOtherEvtRecHead;
10238 pIemCpu->pOtherEvtRecHead = NULL;
10239 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
10240 } while (pEvtRec);
10241 }
10242}
10243
10244
10245/**
10246 * Allocate an event record.
10247 * @returns Pointer to a record.
10248 */
10249IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
10250{
10251 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10252 return NULL;
10253
10254 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
10255 if (pEvtRec)
10256 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
10257 else
10258 {
10259 if (!pIemCpu->ppIemEvtRecNext)
10260 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
10261
10262 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
10263 if (!pEvtRec)
10264 return NULL;
10265 }
10266 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
10267 pEvtRec->pNext = NULL;
10268 return pEvtRec;
10269}
10270
10271
10272/**
10273 * IOMMMIORead notification.
10274 */
10275VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
10276{
10277 PVMCPU pVCpu = VMMGetCpu(pVM);
10278 if (!pVCpu)
10279 return;
10280 PIEMCPU pIemCpu = &pVCpu->iem.s;
10281 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10282 if (!pEvtRec)
10283 return;
10284 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
10285 pEvtRec->u.RamRead.GCPhys = GCPhys;
10286 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
10287 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10288 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10289}
10290
10291
10292/**
10293 * IOMMMIOWrite notification.
10294 */
10295VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
10296{
10297 PVMCPU pVCpu = VMMGetCpu(pVM);
10298 if (!pVCpu)
10299 return;
10300 PIEMCPU pIemCpu = &pVCpu->iem.s;
10301 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10302 if (!pEvtRec)
10303 return;
10304 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
10305 pEvtRec->u.RamWrite.GCPhys = GCPhys;
10306 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
10307 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
10308 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
10309 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
10310 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
10311 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10312 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10313}
10314
10315
10316/**
10317 * IOMIOPortRead notification.
10318 */
10319VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
10320{
10321 PVMCPU pVCpu = VMMGetCpu(pVM);
10322 if (!pVCpu)
10323 return;
10324 PIEMCPU pIemCpu = &pVCpu->iem.s;
10325 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10326 if (!pEvtRec)
10327 return;
10328 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10329 pEvtRec->u.IOPortRead.Port = Port;
10330 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10331 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10332 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10333}
10334
10335/**
10336 * IOMIOPortWrite notification.
10337 */
10338VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10339{
10340 PVMCPU pVCpu = VMMGetCpu(pVM);
10341 if (!pVCpu)
10342 return;
10343 PIEMCPU pIemCpu = &pVCpu->iem.s;
10344 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10345 if (!pEvtRec)
10346 return;
10347 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10348 pEvtRec->u.IOPortWrite.Port = Port;
10349 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10350 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10351 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10352 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10353}
10354
10355
10356VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10357{
10358 PVMCPU pVCpu = VMMGetCpu(pVM);
10359 if (!pVCpu)
10360 return;
10361 PIEMCPU pIemCpu = &pVCpu->iem.s;
10362 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10363 if (!pEvtRec)
10364 return;
10365 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
10366 pEvtRec->u.IOPortStrRead.Port = Port;
10367 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
10368 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
10369 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10370 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10371}
10372
10373
10374VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10375{
10376 PVMCPU pVCpu = VMMGetCpu(pVM);
10377 if (!pVCpu)
10378 return;
10379 PIEMCPU pIemCpu = &pVCpu->iem.s;
10380 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10381 if (!pEvtRec)
10382 return;
10383 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
10384 pEvtRec->u.IOPortStrWrite.Port = Port;
10385 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
10386 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
10387 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10388 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10389}
10390
10391
10392/**
10393 * Fakes and records an I/O port read.
10394 *
10395 * @returns VINF_SUCCESS.
10396 * @param pIemCpu The IEM per CPU data.
10397 * @param Port The I/O port.
10398 * @param pu32Value Where to store the fake value.
10399 * @param cbValue The size of the access.
10400 */
10401IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10402{
10403 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10404 if (pEvtRec)
10405 {
10406 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10407 pEvtRec->u.IOPortRead.Port = Port;
10408 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10409 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10410 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10411 }
10412 pIemCpu->cIOReads++;
10413 *pu32Value = 0xcccccccc;
10414 return VINF_SUCCESS;
10415}
10416
10417
10418/**
10419 * Fakes and records an I/O port write.
10420 *
10421 * @returns VINF_SUCCESS.
10422 * @param pIemCpu The IEM per CPU data.
10423 * @param Port The I/O port.
10424 * @param u32Value The value being written.
10425 * @param cbValue The size of the access.
10426 */
10427IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10428{
10429 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10430 if (pEvtRec)
10431 {
10432 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10433 pEvtRec->u.IOPortWrite.Port = Port;
10434 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10435 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10436 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10437 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10438 }
10439 pIemCpu->cIOWrites++;
10440 return VINF_SUCCESS;
10441}
10442
10443
10444/**
10445 * Used to add extra details about a stub case.
10446 * @param pIemCpu The IEM per CPU state.
10447 */
10448IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10449{
10450 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10451 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10452 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10453 char szRegs[4096];
10454 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10455 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10456 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10457 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10458 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10459 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10460 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10461 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10462 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10463 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10464 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10465 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10466 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10467 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10468 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10469 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10470 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10471 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10472 " efer=%016VR{efer}\n"
10473 " pat=%016VR{pat}\n"
10474 " sf_mask=%016VR{sf_mask}\n"
10475 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10476 " lstar=%016VR{lstar}\n"
10477 " star=%016VR{star} cstar=%016VR{cstar}\n"
10478 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10479 );
10480
10481 char szInstr1[256];
10482 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10483 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10484 szInstr1, sizeof(szInstr1), NULL);
10485 char szInstr2[256];
10486 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10487 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10488 szInstr2, sizeof(szInstr2), NULL);
10489
10490 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10491}
10492
10493
10494/**
10495 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10496 * dump to the assertion info.
10497 *
10498 * @param pEvtRec The record to dump.
10499 */
10500IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10501{
10502 switch (pEvtRec->enmEvent)
10503 {
10504 case IEMVERIFYEVENT_IOPORT_READ:
10505 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10506 pEvtRec->u.IOPortWrite.Port,
10507 pEvtRec->u.IOPortWrite.cbValue);
10508 break;
10509 case IEMVERIFYEVENT_IOPORT_WRITE:
10510 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10511 pEvtRec->u.IOPortWrite.Port,
10512 pEvtRec->u.IOPortWrite.cbValue,
10513 pEvtRec->u.IOPortWrite.u32Value);
10514 break;
10515 case IEMVERIFYEVENT_IOPORT_STR_READ:
10516 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
10517 pEvtRec->u.IOPortStrWrite.Port,
10518 pEvtRec->u.IOPortStrWrite.cbValue,
10519 pEvtRec->u.IOPortStrWrite.cTransfers);
10520 break;
10521 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10522 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
10523 pEvtRec->u.IOPortStrWrite.Port,
10524 pEvtRec->u.IOPortStrWrite.cbValue,
10525 pEvtRec->u.IOPortStrWrite.cTransfers);
10526 break;
10527 case IEMVERIFYEVENT_RAM_READ:
10528 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10529 pEvtRec->u.RamRead.GCPhys,
10530 pEvtRec->u.RamRead.cb);
10531 break;
10532 case IEMVERIFYEVENT_RAM_WRITE:
10533 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10534 pEvtRec->u.RamWrite.GCPhys,
10535 pEvtRec->u.RamWrite.cb,
10536 (int)pEvtRec->u.RamWrite.cb,
10537 pEvtRec->u.RamWrite.ab);
10538 break;
10539 default:
10540 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10541 break;
10542 }
10543}
10544
10545
10546/**
10547 * Raises an assertion on the specified record, showing the given message with
10548 * a record dump attached.
10549 *
10550 * @param pIemCpu The IEM per CPU data.
10551 * @param pEvtRec1 The first record.
10552 * @param pEvtRec2 The second record.
10553 * @param pszMsg The message explaining why we're asserting.
10554 */
10555IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10556{
10557 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10558 iemVerifyAssertAddRecordDump(pEvtRec1);
10559 iemVerifyAssertAddRecordDump(pEvtRec2);
10560 iemVerifyAssertMsg2(pIemCpu);
10561 RTAssertPanic();
10562}
10563
10564
10565/**
10566 * Raises an assertion on the specified record, showing the given message with
10567 * a record dump attached.
10568 *
10569 * @param pIemCpu The IEM per CPU data.
10570 * @param pEvtRec1 The first record.
10571 * @param pszMsg The message explaining why we're asserting.
10572 */
10573IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10574{
10575 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10576 iemVerifyAssertAddRecordDump(pEvtRec);
10577 iemVerifyAssertMsg2(pIemCpu);
10578 RTAssertPanic();
10579}
10580
10581
10582/**
10583 * Verifies a write record.
10584 *
10585 * @param pIemCpu The IEM per CPU data.
10586 * @param pEvtRec The write record.
10587 * @param fRem Set if REM was doing the other executing. If clear
10588 * it was HM.
10589 */
10590IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10591{
10592 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10593 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10594 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10595 if ( RT_FAILURE(rc)
10596 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10597 {
10598 /* fend off ins */
10599 if ( !pIemCpu->cIOReads
10600 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10601 || ( pEvtRec->u.RamWrite.cb != 1
10602 && pEvtRec->u.RamWrite.cb != 2
10603 && pEvtRec->u.RamWrite.cb != 4) )
10604 {
10605 /* fend off ROMs and MMIO */
10606 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10607 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10608 {
10609 /* fend off fxsave */
10610 if (pEvtRec->u.RamWrite.cb != 512)
10611 {
10612 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10613 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10614 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10615 RTAssertMsg2Add("%s: %.*Rhxs\n"
10616 "iem: %.*Rhxs\n",
10617 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10618 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10619 iemVerifyAssertAddRecordDump(pEvtRec);
10620 iemVerifyAssertMsg2(pIemCpu);
10621 RTAssertPanic();
10622 }
10623 }
10624 }
10625 }
10626
10627}
10628
10629/**
10630 * Performs the post-execution verfication checks.
10631 */
10632IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrictIem)
10633{
10634 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10635 return rcStrictIem;
10636
10637 /*
10638 * Switch back the state.
10639 */
10640 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10641 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10642 Assert(pOrgCtx != pDebugCtx);
10643 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10644
10645 /*
10646 * Execute the instruction in REM.
10647 */
10648 bool fRem = false;
10649 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10650 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10651 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10652#ifdef IEM_VERIFICATION_MODE_FULL_HM
10653 if ( HMIsEnabled(pVM)
10654 && pIemCpu->cIOReads == 0
10655 && pIemCpu->cIOWrites == 0
10656 && !pIemCpu->fProblematicMemory)
10657 {
10658 uint64_t uStartRip = pOrgCtx->rip;
10659 unsigned iLoops = 0;
10660 do
10661 {
10662 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10663 iLoops++;
10664 } while ( rc == VINF_SUCCESS
10665 || ( rc == VINF_EM_DBG_STEPPED
10666 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10667 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10668 || ( pOrgCtx->rip != pDebugCtx->rip
10669 && pIemCpu->uInjectCpl != UINT8_MAX
10670 && iLoops < 8) );
10671 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10672 rc = VINF_SUCCESS;
10673 }
10674#endif
10675 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10676 || rc == VINF_IOM_R3_IOPORT_READ
10677 || rc == VINF_IOM_R3_IOPORT_WRITE
10678 || rc == VINF_IOM_R3_MMIO_READ
10679 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10680 || rc == VINF_IOM_R3_MMIO_WRITE
10681 || rc == VINF_CPUM_R3_MSR_READ
10682 || rc == VINF_CPUM_R3_MSR_WRITE
10683 || rc == VINF_EM_RESCHEDULE
10684 )
10685 {
10686 EMRemLock(pVM);
10687 rc = REMR3EmulateInstruction(pVM, pVCpu);
10688 AssertRC(rc);
10689 EMRemUnlock(pVM);
10690 fRem = true;
10691 }
10692
10693# if 1 /* Skip unimplemented instructions for now. */
10694 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10695 {
10696 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10697 if (rc == VINF_EM_DBG_STEPPED)
10698 return VINF_SUCCESS;
10699 return rc;
10700 }
10701# endif
10702
10703 /*
10704 * Compare the register states.
10705 */
10706 unsigned cDiffs = 0;
10707 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10708 {
10709 //Log(("REM and IEM ends up with different registers!\n"));
10710 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10711
10712# define CHECK_FIELD(a_Field) \
10713 do \
10714 { \
10715 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10716 { \
10717 switch (sizeof(pOrgCtx->a_Field)) \
10718 { \
10719 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10720 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10721 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10722 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10723 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10724 } \
10725 cDiffs++; \
10726 } \
10727 } while (0)
10728# define CHECK_XSTATE_FIELD(a_Field) \
10729 do \
10730 { \
10731 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10732 { \
10733 switch (sizeof(pOrgXState->a_Field)) \
10734 { \
10735 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10736 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10737 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10738 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10739 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10740 } \
10741 cDiffs++; \
10742 } \
10743 } while (0)
10744
10745# define CHECK_BIT_FIELD(a_Field) \
10746 do \
10747 { \
10748 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10749 { \
10750 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10751 cDiffs++; \
10752 } \
10753 } while (0)
10754
10755# define CHECK_SEL(a_Sel) \
10756 do \
10757 { \
10758 CHECK_FIELD(a_Sel.Sel); \
10759 CHECK_FIELD(a_Sel.Attr.u); \
10760 CHECK_FIELD(a_Sel.u64Base); \
10761 CHECK_FIELD(a_Sel.u32Limit); \
10762 CHECK_FIELD(a_Sel.fFlags); \
10763 } while (0)
10764
10765 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10766 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10767
10768#if 1 /* The recompiler doesn't update these the intel way. */
10769 if (fRem)
10770 {
10771 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10772 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10773 pOrgXState->x87.CS = pDebugXState->x87.CS;
10774 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10775 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10776 pOrgXState->x87.DS = pDebugXState->x87.DS;
10777 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10778 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10779 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10780 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10781 }
10782#endif
10783 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10784 {
10785 RTAssertMsg2Weak(" the FPU state differs\n");
10786 cDiffs++;
10787 CHECK_XSTATE_FIELD(x87.FCW);
10788 CHECK_XSTATE_FIELD(x87.FSW);
10789 CHECK_XSTATE_FIELD(x87.FTW);
10790 CHECK_XSTATE_FIELD(x87.FOP);
10791 CHECK_XSTATE_FIELD(x87.FPUIP);
10792 CHECK_XSTATE_FIELD(x87.CS);
10793 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10794 CHECK_XSTATE_FIELD(x87.FPUDP);
10795 CHECK_XSTATE_FIELD(x87.DS);
10796 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10797 CHECK_XSTATE_FIELD(x87.MXCSR);
10798 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10799 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10800 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10801 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10802 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10803 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10804 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10805 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10806 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10807 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10808 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10809 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10810 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10811 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10812 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10813 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10814 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10815 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10816 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10817 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10818 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10819 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10820 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10821 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10822 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10823 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10824 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10825 }
10826 CHECK_FIELD(rip);
10827 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10828 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10829 {
10830 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10831 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10832 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10833 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10834 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10835 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10836 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10837 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10838 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10839 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10840 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10841 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10842 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10843 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10844 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10845 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10846 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10847 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10848 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10849 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10850 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10851 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10852 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10853 }
10854
10855 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10856 CHECK_FIELD(rax);
10857 CHECK_FIELD(rcx);
10858 if (!pIemCpu->fIgnoreRaxRdx)
10859 CHECK_FIELD(rdx);
10860 CHECK_FIELD(rbx);
10861 CHECK_FIELD(rsp);
10862 CHECK_FIELD(rbp);
10863 CHECK_FIELD(rsi);
10864 CHECK_FIELD(rdi);
10865 CHECK_FIELD(r8);
10866 CHECK_FIELD(r9);
10867 CHECK_FIELD(r10);
10868 CHECK_FIELD(r11);
10869 CHECK_FIELD(r12);
10870 CHECK_FIELD(r13);
10871 CHECK_SEL(cs);
10872 CHECK_SEL(ss);
10873 CHECK_SEL(ds);
10874 CHECK_SEL(es);
10875 CHECK_SEL(fs);
10876 CHECK_SEL(gs);
10877 CHECK_FIELD(cr0);
10878
10879 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10880 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10881 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10882 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10883 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10884 {
10885 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10886 { /* ignore */ }
10887 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10888 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10889 && fRem)
10890 { /* ignore */ }
10891 else
10892 CHECK_FIELD(cr2);
10893 }
10894 CHECK_FIELD(cr3);
10895 CHECK_FIELD(cr4);
10896 CHECK_FIELD(dr[0]);
10897 CHECK_FIELD(dr[1]);
10898 CHECK_FIELD(dr[2]);
10899 CHECK_FIELD(dr[3]);
10900 CHECK_FIELD(dr[6]);
10901 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10902 CHECK_FIELD(dr[7]);
10903 CHECK_FIELD(gdtr.cbGdt);
10904 CHECK_FIELD(gdtr.pGdt);
10905 CHECK_FIELD(idtr.cbIdt);
10906 CHECK_FIELD(idtr.pIdt);
10907 CHECK_SEL(ldtr);
10908 CHECK_SEL(tr);
10909 CHECK_FIELD(SysEnter.cs);
10910 CHECK_FIELD(SysEnter.eip);
10911 CHECK_FIELD(SysEnter.esp);
10912 CHECK_FIELD(msrEFER);
10913 CHECK_FIELD(msrSTAR);
10914 CHECK_FIELD(msrPAT);
10915 CHECK_FIELD(msrLSTAR);
10916 CHECK_FIELD(msrCSTAR);
10917 CHECK_FIELD(msrSFMASK);
10918 CHECK_FIELD(msrKERNELGSBASE);
10919
10920 if (cDiffs != 0)
10921 {
10922 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10923 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10924 RTAssertPanic();
10925 static bool volatile s_fEnterDebugger = true;
10926 if (s_fEnterDebugger)
10927 DBGFSTOP(pVM);
10928
10929# if 1 /* Ignore unimplemented instructions for now. */
10930 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10931 rcStrictIem = VINF_SUCCESS;
10932# endif
10933 }
10934# undef CHECK_FIELD
10935# undef CHECK_BIT_FIELD
10936 }
10937
10938 /*
10939 * If the register state compared fine, check the verification event
10940 * records.
10941 */
10942 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10943 {
10944 /*
10945 * Compare verficiation event records.
10946 * - I/O port accesses should be a 1:1 match.
10947 */
10948 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10949 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10950 while (pIemRec && pOtherRec)
10951 {
10952 /* Since we might miss RAM writes and reads, ignore reads and check
10953 that any written memory is the same extra ones. */
10954 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10955 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10956 && pIemRec->pNext)
10957 {
10958 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10959 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10960 pIemRec = pIemRec->pNext;
10961 }
10962
10963 /* Do the compare. */
10964 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10965 {
10966 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10967 break;
10968 }
10969 bool fEquals;
10970 switch (pIemRec->enmEvent)
10971 {
10972 case IEMVERIFYEVENT_IOPORT_READ:
10973 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10974 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10975 break;
10976 case IEMVERIFYEVENT_IOPORT_WRITE:
10977 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10978 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10979 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10980 break;
10981 case IEMVERIFYEVENT_IOPORT_STR_READ:
10982 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
10983 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
10984 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
10985 break;
10986 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10987 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
10988 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
10989 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
10990 break;
10991 case IEMVERIFYEVENT_RAM_READ:
10992 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10993 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10994 break;
10995 case IEMVERIFYEVENT_RAM_WRITE:
10996 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10997 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10998 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10999 break;
11000 default:
11001 fEquals = false;
11002 break;
11003 }
11004 if (!fEquals)
11005 {
11006 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
11007 break;
11008 }
11009
11010 /* advance */
11011 pIemRec = pIemRec->pNext;
11012 pOtherRec = pOtherRec->pNext;
11013 }
11014
11015 /* Ignore extra writes and reads. */
11016 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
11017 {
11018 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
11019 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
11020 pIemRec = pIemRec->pNext;
11021 }
11022 if (pIemRec != NULL)
11023 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
11024 else if (pOtherRec != NULL)
11025 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
11026 }
11027 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
11028
11029 return rcStrictIem;
11030}
11031
11032#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
11033
11034/* stubs */
11035IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
11036{
11037 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
11038 return VERR_INTERNAL_ERROR;
11039}
11040
11041IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
11042{
11043 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
11044 return VERR_INTERNAL_ERROR;
11045}
11046
11047#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
11048
11049
11050#ifdef LOG_ENABLED
11051/**
11052 * Logs the current instruction.
11053 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11054 * @param pCtx The current CPU context.
11055 * @param fSameCtx Set if we have the same context information as the VMM,
11056 * clear if we may have already executed an instruction in
11057 * our debug context. When clear, we assume IEMCPU holds
11058 * valid CPU mode info.
11059 */
11060IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
11061{
11062# ifdef IN_RING3
11063 if (LogIs2Enabled())
11064 {
11065 char szInstr[256];
11066 uint32_t cbInstr = 0;
11067 if (fSameCtx)
11068 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
11069 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
11070 szInstr, sizeof(szInstr), &cbInstr);
11071 else
11072 {
11073 uint32_t fFlags = 0;
11074 switch (pVCpu->iem.s.enmCpuMode)
11075 {
11076 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
11077 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
11078 case IEMMODE_16BIT:
11079 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
11080 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
11081 else
11082 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
11083 break;
11084 }
11085 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
11086 szInstr, sizeof(szInstr), &cbInstr);
11087 }
11088
11089 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
11090 Log2(("****\n"
11091 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
11092 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
11093 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
11094 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
11095 " %s\n"
11096 ,
11097 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
11098 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
11099 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
11100 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
11101 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
11102 szInstr));
11103
11104 if (LogIs3Enabled())
11105 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
11106 }
11107 else
11108# endif
11109 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
11110 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
11111}
11112#endif
11113
11114
11115/**
11116 * Makes status code addjustments (pass up from I/O and access handler)
11117 * as well as maintaining statistics.
11118 *
11119 * @returns Strict VBox status code to pass up.
11120 * @param pIemCpu The IEM per CPU data.
11121 * @param rcStrict The status from executing an instruction.
11122 */
11123DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
11124{
11125 if (rcStrict != VINF_SUCCESS)
11126 {
11127 if (RT_SUCCESS(rcStrict))
11128 {
11129 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
11130 || rcStrict == VINF_IOM_R3_IOPORT_READ
11131 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
11132 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
11133 || rcStrict == VINF_IOM_R3_MMIO_READ
11134 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
11135 || rcStrict == VINF_IOM_R3_MMIO_WRITE
11136 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
11137 || rcStrict == VINF_CPUM_R3_MSR_READ
11138 || rcStrict == VINF_CPUM_R3_MSR_WRITE
11139 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
11140 || rcStrict == VINF_EM_RAW_TO_R3
11141 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
11142 /* raw-mode / virt handlers only: */
11143 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
11144 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
11145 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
11146 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
11147 || rcStrict == VINF_SELM_SYNC_GDT
11148 || rcStrict == VINF_CSAM_PENDING_ACTION
11149 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
11150 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11151/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
11152 int32_t const rcPassUp = pIemCpu->rcPassUp;
11153 if (rcPassUp == VINF_SUCCESS)
11154 pIemCpu->cRetInfStatuses++;
11155 else if ( rcPassUp < VINF_EM_FIRST
11156 || rcPassUp > VINF_EM_LAST
11157 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
11158 {
11159 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
11160 pIemCpu->cRetPassUpStatus++;
11161 rcStrict = rcPassUp;
11162 }
11163 else
11164 {
11165 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
11166 pIemCpu->cRetInfStatuses++;
11167 }
11168 }
11169 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
11170 pIemCpu->cRetAspectNotImplemented++;
11171 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
11172 pIemCpu->cRetInstrNotImplemented++;
11173#ifdef IEM_VERIFICATION_MODE_FULL
11174 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
11175 rcStrict = VINF_SUCCESS;
11176#endif
11177 else
11178 pIemCpu->cRetErrStatuses++;
11179 }
11180 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
11181 {
11182 pIemCpu->cRetPassUpStatus++;
11183 rcStrict = pIemCpu->rcPassUp;
11184 }
11185
11186 return rcStrict;
11187}
11188
11189
11190/**
11191 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
11192 * IEMExecOneWithPrefetchedByPC.
11193 *
11194 * @return Strict VBox status code.
11195 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11196 * @param pIemCpu The IEM per CPU data.
11197 * @param fExecuteInhibit If set, execute the instruction following CLI,
11198 * POP SS and MOV SS,GR.
11199 */
11200DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
11201{
11202 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
11203 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
11204 if (rcStrict == VINF_SUCCESS)
11205 pIemCpu->cInstructions++;
11206 if (pIemCpu->cActiveMappings > 0)
11207 iemMemRollback(pIemCpu);
11208//#ifdef DEBUG
11209// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
11210//#endif
11211
11212 /* Execute the next instruction as well if a cli, pop ss or
11213 mov ss, Gr has just completed successfully. */
11214 if ( fExecuteInhibit
11215 && rcStrict == VINF_SUCCESS
11216 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
11217 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
11218 {
11219 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
11220 if (rcStrict == VINF_SUCCESS)
11221 {
11222# ifdef LOG_ENABLED
11223 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
11224# endif
11225 IEM_OPCODE_GET_NEXT_U8(&b);
11226 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
11227 if (rcStrict == VINF_SUCCESS)
11228 pIemCpu->cInstructions++;
11229 if (pIemCpu->cActiveMappings > 0)
11230 iemMemRollback(pIemCpu);
11231 }
11232 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
11233 }
11234
11235 /*
11236 * Return value fiddling, statistics and sanity assertions.
11237 */
11238 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11239
11240 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
11241 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
11242#if defined(IEM_VERIFICATION_MODE_FULL)
11243 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
11244 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
11245 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
11246 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
11247#endif
11248 return rcStrict;
11249}
11250
11251
11252#ifdef IN_RC
11253/**
11254 * Re-enters raw-mode or ensure we return to ring-3.
11255 *
11256 * @returns rcStrict, maybe modified.
11257 * @param pIemCpu The IEM CPU structure.
11258 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11259 * @param pCtx The current CPU context.
11260 * @param rcStrict The status code returne by the interpreter.
11261 */
11262DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
11263{
11264 if ( !pIemCpu->fInPatchCode
11265 && ( rcStrict == VINF_SUCCESS
11266 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
11267 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
11268 {
11269 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
11270 CPUMRawEnter(pVCpu);
11271 else
11272 {
11273 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
11274 rcStrict = VINF_EM_RESCHEDULE;
11275 }
11276 }
11277 return rcStrict;
11278}
11279#endif
11280
11281
11282/**
11283 * Execute one instruction.
11284 *
11285 * @return Strict VBox status code.
11286 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11287 */
11288VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
11289{
11290 PIEMCPU pIemCpu = &pVCpu->iem.s;
11291
11292#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11293 if (++pIemCpu->cVerifyDepth == 1)
11294 iemExecVerificationModeSetup(pIemCpu);
11295#endif
11296#ifdef LOG_ENABLED
11297 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11298 iemLogCurInstr(pVCpu, pCtx, true);
11299#endif
11300
11301 /*
11302 * Do the decoding and emulation.
11303 */
11304 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11305 if (rcStrict == VINF_SUCCESS)
11306 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11307
11308#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11309 /*
11310 * Assert some sanity.
11311 */
11312 if (pIemCpu->cVerifyDepth == 1)
11313 rcStrict = iemExecVerificationModeCheck(pIemCpu, rcStrict);
11314 pIemCpu->cVerifyDepth--;
11315#endif
11316#ifdef IN_RC
11317 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11318#endif
11319 if (rcStrict != VINF_SUCCESS)
11320 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11321 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11322 return rcStrict;
11323}
11324
11325
11326VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11327{
11328 PIEMCPU pIemCpu = &pVCpu->iem.s;
11329 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11330 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11331
11332 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11333 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11334 if (rcStrict == VINF_SUCCESS)
11335 {
11336 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11337 if (pcbWritten)
11338 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11339 }
11340
11341#ifdef IN_RC
11342 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11343#endif
11344 return rcStrict;
11345}
11346
11347
11348VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11349 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11350{
11351 PIEMCPU pIemCpu = &pVCpu->iem.s;
11352 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11353 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11354
11355 VBOXSTRICTRC rcStrict;
11356 if ( cbOpcodeBytes
11357 && pCtx->rip == OpcodeBytesPC)
11358 {
11359 iemInitDecoder(pIemCpu, false);
11360 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11361 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11362 rcStrict = VINF_SUCCESS;
11363 }
11364 else
11365 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11366 if (rcStrict == VINF_SUCCESS)
11367 {
11368 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11369 }
11370
11371#ifdef IN_RC
11372 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11373#endif
11374 return rcStrict;
11375}
11376
11377
11378VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11379{
11380 PIEMCPU pIemCpu = &pVCpu->iem.s;
11381 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11382 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11383
11384 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11385 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11386 if (rcStrict == VINF_SUCCESS)
11387 {
11388 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11389 if (pcbWritten)
11390 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11391 }
11392
11393#ifdef IN_RC
11394 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11395#endif
11396 return rcStrict;
11397}
11398
11399
11400VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11401 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11402{
11403 PIEMCPU pIemCpu = &pVCpu->iem.s;
11404 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11405 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11406
11407 VBOXSTRICTRC rcStrict;
11408 if ( cbOpcodeBytes
11409 && pCtx->rip == OpcodeBytesPC)
11410 {
11411 iemInitDecoder(pIemCpu, true);
11412 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11413 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11414 rcStrict = VINF_SUCCESS;
11415 }
11416 else
11417 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11418 if (rcStrict == VINF_SUCCESS)
11419 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11420
11421#ifdef IN_RC
11422 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11423#endif
11424 return rcStrict;
11425}
11426
11427
11428VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11429{
11430 PIEMCPU pIemCpu = &pVCpu->iem.s;
11431
11432 /*
11433 * See if there is an interrupt pending in TRPM and inject it if we can.
11434 */
11435#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11436 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11437# ifdef IEM_VERIFICATION_MODE_FULL
11438 pIemCpu->uInjectCpl = UINT8_MAX;
11439# endif
11440 if ( pCtx->eflags.Bits.u1IF
11441 && TRPMHasTrap(pVCpu)
11442 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11443 {
11444 uint8_t u8TrapNo;
11445 TRPMEVENT enmType;
11446 RTGCUINT uErrCode;
11447 RTGCPTR uCr2;
11448 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11449 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11450 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11451 TRPMResetTrap(pVCpu);
11452 }
11453#else
11454 iemExecVerificationModeSetup(pIemCpu);
11455 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11456#endif
11457
11458 /*
11459 * Log the state.
11460 */
11461#ifdef LOG_ENABLED
11462 iemLogCurInstr(pVCpu, pCtx, true);
11463#endif
11464
11465 /*
11466 * Do the decoding and emulation.
11467 */
11468 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11469 if (rcStrict == VINF_SUCCESS)
11470 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11471
11472#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11473 /*
11474 * Assert some sanity.
11475 */
11476 rcStrict = iemExecVerificationModeCheck(pIemCpu, rcStrict);
11477#endif
11478
11479 /*
11480 * Maybe re-enter raw-mode and log.
11481 */
11482#ifdef IN_RC
11483 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11484#endif
11485 if (rcStrict != VINF_SUCCESS)
11486 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11487 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11488 return rcStrict;
11489}
11490
11491
11492
11493/**
11494 * Injects a trap, fault, abort, software interrupt or external interrupt.
11495 *
11496 * The parameter list matches TRPMQueryTrapAll pretty closely.
11497 *
11498 * @returns Strict VBox status code.
11499 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11500 * @param u8TrapNo The trap number.
11501 * @param enmType What type is it (trap/fault/abort), software
11502 * interrupt or hardware interrupt.
11503 * @param uErrCode The error code if applicable.
11504 * @param uCr2 The CR2 value if applicable.
11505 * @param cbInstr The instruction length (only relevant for
11506 * software interrupts).
11507 */
11508VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11509 uint8_t cbInstr)
11510{
11511 iemInitDecoder(&pVCpu->iem.s, false);
11512#ifdef DBGFTRACE_ENABLED
11513 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11514 u8TrapNo, enmType, uErrCode, uCr2);
11515#endif
11516
11517 uint32_t fFlags;
11518 switch (enmType)
11519 {
11520 case TRPM_HARDWARE_INT:
11521 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11522 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11523 uErrCode = uCr2 = 0;
11524 break;
11525
11526 case TRPM_SOFTWARE_INT:
11527 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11528 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11529 uErrCode = uCr2 = 0;
11530 break;
11531
11532 case TRPM_TRAP:
11533 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11534 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11535 if (u8TrapNo == X86_XCPT_PF)
11536 fFlags |= IEM_XCPT_FLAGS_CR2;
11537 switch (u8TrapNo)
11538 {
11539 case X86_XCPT_DF:
11540 case X86_XCPT_TS:
11541 case X86_XCPT_NP:
11542 case X86_XCPT_SS:
11543 case X86_XCPT_PF:
11544 case X86_XCPT_AC:
11545 fFlags |= IEM_XCPT_FLAGS_ERR;
11546 break;
11547
11548 case X86_XCPT_NMI:
11549 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11550 break;
11551 }
11552 break;
11553
11554 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11555 }
11556
11557 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11558}
11559
11560
11561/**
11562 * Injects the active TRPM event.
11563 *
11564 * @returns Strict VBox status code.
11565 * @param pVCpu The cross context virtual CPU structure.
11566 */
11567VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11568{
11569#ifndef IEM_IMPLEMENTS_TASKSWITCH
11570 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11571#else
11572 uint8_t u8TrapNo;
11573 TRPMEVENT enmType;
11574 RTGCUINT uErrCode;
11575 RTGCUINTPTR uCr2;
11576 uint8_t cbInstr;
11577 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11578 if (RT_FAILURE(rc))
11579 return rc;
11580
11581 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11582
11583 /** @todo Are there any other codes that imply the event was successfully
11584 * delivered to the guest? See @bugref{6607}. */
11585 if ( rcStrict == VINF_SUCCESS
11586 || rcStrict == VINF_IEM_RAISED_XCPT)
11587 {
11588 TRPMResetTrap(pVCpu);
11589 }
11590 return rcStrict;
11591#endif
11592}
11593
11594
11595VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11596{
11597 return VERR_NOT_IMPLEMENTED;
11598}
11599
11600
11601VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11602{
11603 return VERR_NOT_IMPLEMENTED;
11604}
11605
11606
11607#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11608/**
11609 * Executes a IRET instruction with default operand size.
11610 *
11611 * This is for PATM.
11612 *
11613 * @returns VBox status code.
11614 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11615 * @param pCtxCore The register frame.
11616 */
11617VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11618{
11619 PIEMCPU pIemCpu = &pVCpu->iem.s;
11620 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11621
11622 iemCtxCoreToCtx(pCtx, pCtxCore);
11623 iemInitDecoder(pIemCpu);
11624 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11625 if (rcStrict == VINF_SUCCESS)
11626 iemCtxToCtxCore(pCtxCore, pCtx);
11627 else
11628 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11629 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11630 return rcStrict;
11631}
11632#endif
11633
11634
11635/**
11636 * Macro used by the IEMExec* method to check the given instruction length.
11637 *
11638 * Will return on failure!
11639 *
11640 * @param a_cbInstr The given instruction length.
11641 * @param a_cbMin The minimum length.
11642 */
11643#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11644 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11645 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11646
11647
11648/**
11649 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
11650 *
11651 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
11652 *
11653 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
11654 * @param pIemCpu The IEM per-CPU structure.
11655 * @param rcStrict The status code to fiddle.
11656 */
11657DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
11658{
11659 iemUninitExec(pIemCpu);
11660#ifdef IN_RC
11661 return iemRCRawMaybeReenter(pIemCpu, IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx),
11662 iemExecStatusCodeFiddling(pIemCpu, rcStrict));
11663#else
11664 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11665#endif
11666}
11667
11668
11669/**
11670 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11671 *
11672 * This API ASSUMES that the caller has already verified that the guest code is
11673 * allowed to access the I/O port. (The I/O port is in the DX register in the
11674 * guest state.)
11675 *
11676 * @returns Strict VBox status code.
11677 * @param pVCpu The cross context virtual CPU structure.
11678 * @param cbValue The size of the I/O port access (1, 2, or 4).
11679 * @param enmAddrMode The addressing mode.
11680 * @param fRepPrefix Indicates whether a repeat prefix is used
11681 * (doesn't matter which for this instruction).
11682 * @param cbInstr The instruction length in bytes.
11683 * @param iEffSeg The effective segment address.
11684 * @param fIoChecked Whether the access to the I/O port has been
11685 * checked or not. It's typically checked in the
11686 * HM scenario.
11687 */
11688VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11689 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
11690{
11691 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11692 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11693
11694 /*
11695 * State init.
11696 */
11697 PIEMCPU pIemCpu = &pVCpu->iem.s;
11698 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11699
11700 /*
11701 * Switch orgy for getting to the right handler.
11702 */
11703 VBOXSTRICTRC rcStrict;
11704 if (fRepPrefix)
11705 {
11706 switch (enmAddrMode)
11707 {
11708 case IEMMODE_16BIT:
11709 switch (cbValue)
11710 {
11711 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11712 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11713 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11714 default:
11715 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11716 }
11717 break;
11718
11719 case IEMMODE_32BIT:
11720 switch (cbValue)
11721 {
11722 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11723 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11724 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11725 default:
11726 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11727 }
11728 break;
11729
11730 case IEMMODE_64BIT:
11731 switch (cbValue)
11732 {
11733 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11734 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11735 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11736 default:
11737 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11738 }
11739 break;
11740
11741 default:
11742 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11743 }
11744 }
11745 else
11746 {
11747 switch (enmAddrMode)
11748 {
11749 case IEMMODE_16BIT:
11750 switch (cbValue)
11751 {
11752 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11753 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11754 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11755 default:
11756 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11757 }
11758 break;
11759
11760 case IEMMODE_32BIT:
11761 switch (cbValue)
11762 {
11763 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11764 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11765 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11766 default:
11767 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11768 }
11769 break;
11770
11771 case IEMMODE_64BIT:
11772 switch (cbValue)
11773 {
11774 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11775 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11776 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, fIoChecked); break;
11777 default:
11778 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11779 }
11780 break;
11781
11782 default:
11783 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11784 }
11785 }
11786
11787 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11788}
11789
11790
11791/**
11792 * Interface for HM and EM for executing string I/O IN (read) instructions.
11793 *
11794 * This API ASSUMES that the caller has already verified that the guest code is
11795 * allowed to access the I/O port. (The I/O port is in the DX register in the
11796 * guest state.)
11797 *
11798 * @returns Strict VBox status code.
11799 * @param pVCpu The cross context virtual CPU structure.
11800 * @param cbValue The size of the I/O port access (1, 2, or 4).
11801 * @param enmAddrMode The addressing mode.
11802 * @param fRepPrefix Indicates whether a repeat prefix is used
11803 * (doesn't matter which for this instruction).
11804 * @param cbInstr The instruction length in bytes.
11805 * @param fIoChecked Whether the access to the I/O port has been
11806 * checked or not. It's typically checked in the
11807 * HM scenario.
11808 */
11809VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11810 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
11811{
11812 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11813
11814 /*
11815 * State init.
11816 */
11817 PIEMCPU pIemCpu = &pVCpu->iem.s;
11818 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11819
11820 /*
11821 * Switch orgy for getting to the right handler.
11822 */
11823 VBOXSTRICTRC rcStrict;
11824 if (fRepPrefix)
11825 {
11826 switch (enmAddrMode)
11827 {
11828 case IEMMODE_16BIT:
11829 switch (cbValue)
11830 {
11831 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, fIoChecked); break;
11832 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, fIoChecked); break;
11833 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, fIoChecked); break;
11834 default:
11835 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11836 }
11837 break;
11838
11839 case IEMMODE_32BIT:
11840 switch (cbValue)
11841 {
11842 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, fIoChecked); break;
11843 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, fIoChecked); break;
11844 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, fIoChecked); break;
11845 default:
11846 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11847 }
11848 break;
11849
11850 case IEMMODE_64BIT:
11851 switch (cbValue)
11852 {
11853 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, fIoChecked); break;
11854 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, fIoChecked); break;
11855 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, fIoChecked); break;
11856 default:
11857 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11858 }
11859 break;
11860
11861 default:
11862 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11863 }
11864 }
11865 else
11866 {
11867 switch (enmAddrMode)
11868 {
11869 case IEMMODE_16BIT:
11870 switch (cbValue)
11871 {
11872 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, fIoChecked); break;
11873 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, fIoChecked); break;
11874 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, fIoChecked); break;
11875 default:
11876 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11877 }
11878 break;
11879
11880 case IEMMODE_32BIT:
11881 switch (cbValue)
11882 {
11883 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, fIoChecked); break;
11884 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, fIoChecked); break;
11885 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, fIoChecked); break;
11886 default:
11887 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11888 }
11889 break;
11890
11891 case IEMMODE_64BIT:
11892 switch (cbValue)
11893 {
11894 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, fIoChecked); break;
11895 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, fIoChecked); break;
11896 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, fIoChecked); break;
11897 default:
11898 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11899 }
11900 break;
11901
11902 default:
11903 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11904 }
11905 }
11906
11907 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11908}
11909
11910
11911/**
11912 * Interface for rawmode to write execute an OUT instruction.
11913 *
11914 * @returns Strict VBox status code.
11915 * @param pVCpu The cross context virtual CPU structure.
11916 * @param cbInstr The instruction length in bytes.
11917 * @param u16Port The port to read.
11918 * @param cbReg The register size.
11919 *
11920 * @remarks In ring-0 not all of the state needs to be synced in.
11921 */
11922VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
11923{
11924 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11925 Assert(cbReg <= 4 && cbReg != 3);
11926
11927 PIEMCPU pIemCpu = &pVCpu->iem.s;
11928 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11929 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
11930 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11931}
11932
11933
11934/**
11935 * Interface for rawmode to write execute an IN instruction.
11936 *
11937 * @returns Strict VBox status code.
11938 * @param pVCpu The cross context virtual CPU structure.
11939 * @param cbInstr The instruction length in bytes.
11940 * @param u16Port The port to read.
11941 * @param cbReg The register size.
11942 */
11943VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
11944{
11945 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11946 Assert(cbReg <= 4 && cbReg != 3);
11947
11948 PIEMCPU pIemCpu = &pVCpu->iem.s;
11949 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11950 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
11951 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11952}
11953
11954
11955/**
11956 * Interface for HM and EM to write to a CRx register.
11957 *
11958 * @returns Strict VBox status code.
11959 * @param pVCpu The cross context virtual CPU structure.
11960 * @param cbInstr The instruction length in bytes.
11961 * @param iCrReg The control register number (destination).
11962 * @param iGReg The general purpose register number (source).
11963 *
11964 * @remarks In ring-0 not all of the state needs to be synced in.
11965 */
11966VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11967{
11968 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11969 Assert(iCrReg < 16);
11970 Assert(iGReg < 16);
11971
11972 PIEMCPU pIemCpu = &pVCpu->iem.s;
11973 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11974 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11975 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
11976}
11977
11978
11979/**
11980 * Interface for HM and EM to read from a CRx register.
11981 *
11982 * @returns Strict VBox status code.
11983 * @param pVCpu The cross context virtual CPU structure.
11984 * @param cbInstr The instruction length in bytes.
11985 * @param iGReg The general purpose register number (destination).
11986 * @param iCrReg The control register number (source).
11987 *
11988 * @remarks In ring-0 not all of the state needs to be synced in.
11989 */
11990VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11991{
11992 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11993 Assert(iCrReg < 16);
11994 Assert(iGReg < 16);
11995
11996 PIEMCPU pIemCpu = &pVCpu->iem.s;
11997 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11998 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11999 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
12000}
12001
12002
12003/**
12004 * Interface for HM and EM to clear the CR0[TS] bit.
12005 *
12006 * @returns Strict VBox status code.
12007 * @param pVCpu The cross context virtual CPU structure.
12008 * @param cbInstr The instruction length in bytes.
12009 *
12010 * @remarks In ring-0 not all of the state needs to be synced in.
12011 */
12012VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
12013{
12014 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
12015
12016 PIEMCPU pIemCpu = &pVCpu->iem.s;
12017 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
12018 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
12019 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
12020}
12021
12022
12023/**
12024 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
12025 *
12026 * @returns Strict VBox status code.
12027 * @param pVCpu The cross context virtual CPU structure.
12028 * @param cbInstr The instruction length in bytes.
12029 * @param uValue The value to load into CR0.
12030 *
12031 * @remarks In ring-0 not all of the state needs to be synced in.
12032 */
12033VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
12034{
12035 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
12036
12037 PIEMCPU pIemCpu = &pVCpu->iem.s;
12038 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
12039 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
12040 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
12041}
12042
12043
12044/**
12045 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
12046 *
12047 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
12048 *
12049 * @returns Strict VBox status code.
12050 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
12051 * @param cbInstr The instruction length in bytes.
12052 * @remarks In ring-0 not all of the state needs to be synced in.
12053 * @thread EMT(pVCpu)
12054 */
12055VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
12056{
12057 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
12058
12059 PIEMCPU pIemCpu = &pVCpu->iem.s;
12060 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
12061 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
12062 return iemUninitExecAndFiddleStatusAndMaybeReenter(pIemCpu, rcStrict);
12063}
12064
12065#ifdef IN_RING3
12066
12067/**
12068 * Handles the unlikely and probably fatal merge cases.
12069 *
12070 * @returns Merged status code.
12071 * @param rcStrict Current EM status code.
12072 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
12073 * with @a rcStrict.
12074 * @param iMemMap The memory mapping index. For error reporting only.
12075 * @param pIemCpu The IEMCPU structure of the calling EMT, for error
12076 * reporting only.
12077 */
12078DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
12079 unsigned iMemMap, PIEMCPU pIemCpu)
12080{
12081 if (RT_FAILURE_NP(rcStrict))
12082 return rcStrict;
12083
12084 if (RT_FAILURE_NP(rcStrictCommit))
12085 return rcStrictCommit;
12086
12087 if (rcStrict == rcStrictCommit)
12088 return rcStrictCommit;
12089
12090 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
12091 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
12092 pIemCpu->aMemMappings[iMemMap].fAccess,
12093 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pIemCpu->aMemBbMappings[iMemMap].cbFirst,
12094 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pIemCpu->aMemBbMappings[iMemMap].cbSecond));
12095 return VERR_IOM_FF_STATUS_IPE;
12096}
12097
12098
12099/**
12100 * Helper for IOMR3ProcessForceFlag.
12101 *
12102 * @returns Merged status code.
12103 * @param rcStrict Current EM status code.
12104 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
12105 * with @a rcStrict.
12106 * @param iMemMap The memory mapping index. For error reporting only.
12107 * @param pIemCpu The IEMCPU structure of the calling EMT, for error
12108 * reporting only.
12109 */
12110DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PIEMCPU pIemCpu)
12111{
12112 /* Simple. */
12113 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
12114 return rcStrictCommit;
12115
12116 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
12117 return rcStrict;
12118
12119 /* EM scheduling status codes. */
12120 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
12121 && rcStrict <= VINF_EM_LAST))
12122 {
12123 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
12124 && rcStrictCommit <= VINF_EM_LAST))
12125 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
12126 }
12127
12128 /* Unlikely */
12129 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pIemCpu);
12130}
12131
12132
12133/**
12134 * Called by force-flag handling code when VMCPU_FF_IEM is set.
12135 *
12136 * @returns Merge between @a rcStrict and what the commit operation returned.
12137 * @param pVM The cross context VM structure.
12138 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
12139 * @param rcStrict The status code returned by ring-0 or raw-mode.
12140 */
12141VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
12142{
12143 PIEMCPU pIemCpu = &pVCpu->iem.s;
12144
12145 /*
12146 * Reset the pending commit.
12147 */
12148 AssertMsg( (pIemCpu->aMemMappings[0].fAccess | pIemCpu->aMemMappings[1].fAccess | pIemCpu->aMemMappings[2].fAccess)
12149 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
12150 ("%#x %#x %#x\n",
12151 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess, pIemCpu->aMemMappings[2].fAccess));
12152 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
12153
12154 /*
12155 * Commit the pending bounce buffers (usually just one).
12156 */
12157 unsigned cBufs = 0;
12158 unsigned iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
12159 while (iMemMap-- > 0)
12160 if (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
12161 {
12162 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
12163 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
12164 Assert(!pIemCpu->aMemBbMappings[iMemMap].fUnassigned);
12165
12166 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
12167 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
12168 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
12169
12170 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
12171 {
12172 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
12173 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
12174 pbBuf,
12175 cbFirst,
12176 PGMACCESSORIGIN_IEM);
12177 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pIemCpu);
12178 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
12179 iMemMap, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
12180 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
12181 }
12182
12183 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
12184 {
12185 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
12186 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
12187 pbBuf + cbFirst,
12188 cbSecond,
12189 PGMACCESSORIGIN_IEM);
12190 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pIemCpu);
12191 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
12192 iMemMap, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
12193 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
12194 }
12195 cBufs++;
12196 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
12197 }
12198
12199 AssertMsg(cBufs > 0 && cBufs == pIemCpu->cActiveMappings,
12200 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pIemCpu->cActiveMappings,
12201 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess, pIemCpu->aMemMappings[2].fAccess));
12202 pIemCpu->cActiveMappings = 0;
12203 return rcStrict;
12204}
12205
12206#endif /* IN_RING3 */
12207
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette