VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 57516

Last change on this file since 57516 was 57432, checked in by vboxsync, 9 years ago

iprt/cdefs.h,*: Split RT_NO_THROW into prototype and definition macros named RT_NO_THROW_PROTO and RT_NO_THROW_DEF respecitively.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 442.8 KB
Line 
1/* $Id: IEMAll.cpp 57432 2015-08-18 14:57:46Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78#define IEM_IMPLEMENTS_TASKSWITCH
79
80
81/*********************************************************************************************************************************
82* Header Files *
83*********************************************************************************************************************************/
84#define LOG_GROUP LOG_GROUP_IEM
85#include <VBox/vmm/iem.h>
86#include <VBox/vmm/cpum.h>
87#include <VBox/vmm/pdm.h>
88#include <VBox/vmm/pgm.h>
89#include <internal/pgm.h>
90#include <VBox/vmm/iom.h>
91#include <VBox/vmm/em.h>
92#include <VBox/vmm/hm.h>
93#include <VBox/vmm/tm.h>
94#include <VBox/vmm/dbgf.h>
95#include <VBox/vmm/dbgftrace.h>
96#ifdef VBOX_WITH_RAW_MODE_NOT_R0
97# include <VBox/vmm/patm.h>
98# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
99# include <VBox/vmm/csam.h>
100# endif
101#endif
102#include "IEMInternal.h"
103#ifdef IEM_VERIFICATION_MODE_FULL
104# include <VBox/vmm/rem.h>
105# include <VBox/vmm/mm.h>
106#endif
107#include <VBox/vmm/vm.h>
108#include <VBox/log.h>
109#include <VBox/err.h>
110#include <VBox/param.h>
111#include <VBox/dis.h>
112#include <VBox/disopcode.h>
113#include <iprt/assert.h>
114#include <iprt/string.h>
115#include <iprt/x86.h>
116
117
118
119/*********************************************************************************************************************************
120* Structures and Typedefs *
121*********************************************************************************************************************************/
122/** @typedef PFNIEMOP
123 * Pointer to an opcode decoder function.
124 */
125
126/** @def FNIEMOP_DEF
127 * Define an opcode decoder function.
128 *
129 * We're using macors for this so that adding and removing parameters as well as
130 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
131 *
132 * @param a_Name The function name.
133 */
134
135
136#if defined(__GNUC__) && defined(RT_ARCH_X86)
137typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
138# define FNIEMOP_DEF(a_Name) \
139 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
140# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
141 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
142# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
143 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
144
145#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
146typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
147# define FNIEMOP_DEF(a_Name) \
148 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
149# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
150 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
151# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
152 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
153
154#elif defined(__GNUC__)
155typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
156# define FNIEMOP_DEF(a_Name) \
157 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
158# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
159 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
160# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
161 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
162
163#else
164typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#endif
173
174
175/**
176 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
177 */
178typedef union IEMSELDESC
179{
180 /** The legacy view. */
181 X86DESC Legacy;
182 /** The long mode view. */
183 X86DESC64 Long;
184} IEMSELDESC;
185/** Pointer to a selector descriptor table entry. */
186typedef IEMSELDESC *PIEMSELDESC;
187
188
189/*********************************************************************************************************************************
190* Defined Constants And Macros *
191*********************************************************************************************************************************/
192/** Temporary hack to disable the double execution. Will be removed in favor
193 * of a dedicated execution mode in EM. */
194//#define IEM_VERIFICATION_MODE_NO_REM
195
196/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
197 * due to GCC lacking knowledge about the value range of a switch. */
198#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
199
200/**
201 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
202 * occation.
203 */
204#ifdef LOG_ENABLED
205# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
206 do { \
207 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
208 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
209 } while (0)
210#else
211# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
212 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
213#endif
214
215/**
216 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
217 * occation using the supplied logger statement.
218 *
219 * @param a_LoggerArgs What to log on failure.
220 */
221#ifdef LOG_ENABLED
222# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
223 do { \
224 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
225 /*LogFunc(a_LoggerArgs);*/ \
226 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
227 } while (0)
228#else
229# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
230 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
231#endif
232
233/**
234 * Call an opcode decoder function.
235 *
236 * We're using macors for this so that adding and removing parameters can be
237 * done as we please. See FNIEMOP_DEF.
238 */
239#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
240
241/**
242 * Call a common opcode decoder function taking one extra argument.
243 *
244 * We're using macors for this so that adding and removing parameters can be
245 * done as we please. See FNIEMOP_DEF_1.
246 */
247#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
248
249/**
250 * Call a common opcode decoder function taking one extra argument.
251 *
252 * We're using macors for this so that adding and removing parameters can be
253 * done as we please. See FNIEMOP_DEF_1.
254 */
255#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
256
257/**
258 * Check if we're currently executing in real or virtual 8086 mode.
259 *
260 * @returns @c true if it is, @c false if not.
261 * @param a_pIemCpu The IEM state of the current CPU.
262 */
263#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
264
265/**
266 * Check if we're currently executing in virtual 8086 mode.
267 *
268 * @returns @c true if it is, @c false if not.
269 * @param a_pIemCpu The IEM state of the current CPU.
270 */
271#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
272
273/**
274 * Check if we're currently executing in long mode.
275 *
276 * @returns @c true if it is, @c false if not.
277 * @param a_pIemCpu The IEM state of the current CPU.
278 */
279#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
280
281/**
282 * Check if we're currently executing in real mode.
283 *
284 * @returns @c true if it is, @c false if not.
285 * @param a_pIemCpu The IEM state of the current CPU.
286 */
287#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
288
289/**
290 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
291 * @returns PCCPUMFEATURES
292 * @param a_pIemCpu The IEM state of the current CPU.
293 */
294#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
295
296/**
297 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
298 * @returns PCCPUMFEATURES
299 * @param a_pIemCpu The IEM state of the current CPU.
300 */
301#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
302
303/**
304 * Evaluates to true if we're presenting an Intel CPU to the guest.
305 */
306#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
307
308/**
309 * Evaluates to true if we're presenting an AMD CPU to the guest.
310 */
311#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
312
313/**
314 * Check if the address is canonical.
315 */
316#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
317
318
319/*********************************************************************************************************************************
320* Global Variables *
321*********************************************************************************************************************************/
322extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
323
324
325/** Function table for the ADD instruction. */
326IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
327{
328 iemAImpl_add_u8, iemAImpl_add_u8_locked,
329 iemAImpl_add_u16, iemAImpl_add_u16_locked,
330 iemAImpl_add_u32, iemAImpl_add_u32_locked,
331 iemAImpl_add_u64, iemAImpl_add_u64_locked
332};
333
334/** Function table for the ADC instruction. */
335IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
336{
337 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
338 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
339 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
340 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
341};
342
343/** Function table for the SUB instruction. */
344IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
345{
346 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
347 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
348 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
349 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
350};
351
352/** Function table for the SBB instruction. */
353IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
354{
355 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
356 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
357 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
358 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
359};
360
361/** Function table for the OR instruction. */
362IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
363{
364 iemAImpl_or_u8, iemAImpl_or_u8_locked,
365 iemAImpl_or_u16, iemAImpl_or_u16_locked,
366 iemAImpl_or_u32, iemAImpl_or_u32_locked,
367 iemAImpl_or_u64, iemAImpl_or_u64_locked
368};
369
370/** Function table for the XOR instruction. */
371IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
372{
373 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
374 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
375 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
376 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
377};
378
379/** Function table for the AND instruction. */
380IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
381{
382 iemAImpl_and_u8, iemAImpl_and_u8_locked,
383 iemAImpl_and_u16, iemAImpl_and_u16_locked,
384 iemAImpl_and_u32, iemAImpl_and_u32_locked,
385 iemAImpl_and_u64, iemAImpl_and_u64_locked
386};
387
388/** Function table for the CMP instruction.
389 * @remarks Making operand order ASSUMPTIONS.
390 */
391IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
392{
393 iemAImpl_cmp_u8, NULL,
394 iemAImpl_cmp_u16, NULL,
395 iemAImpl_cmp_u32, NULL,
396 iemAImpl_cmp_u64, NULL
397};
398
399/** Function table for the TEST instruction.
400 * @remarks Making operand order ASSUMPTIONS.
401 */
402IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
403{
404 iemAImpl_test_u8, NULL,
405 iemAImpl_test_u16, NULL,
406 iemAImpl_test_u32, NULL,
407 iemAImpl_test_u64, NULL
408};
409
410/** Function table for the BT instruction. */
411IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
412{
413 NULL, NULL,
414 iemAImpl_bt_u16, NULL,
415 iemAImpl_bt_u32, NULL,
416 iemAImpl_bt_u64, NULL
417};
418
419/** Function table for the BTC instruction. */
420IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
421{
422 NULL, NULL,
423 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
424 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
425 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
426};
427
428/** Function table for the BTR instruction. */
429IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
430{
431 NULL, NULL,
432 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
433 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
434 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
435};
436
437/** Function table for the BTS instruction. */
438IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
439{
440 NULL, NULL,
441 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
442 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
443 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
444};
445
446/** Function table for the BSF instruction. */
447IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
448{
449 NULL, NULL,
450 iemAImpl_bsf_u16, NULL,
451 iemAImpl_bsf_u32, NULL,
452 iemAImpl_bsf_u64, NULL
453};
454
455/** Function table for the BSR instruction. */
456IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
457{
458 NULL, NULL,
459 iemAImpl_bsr_u16, NULL,
460 iemAImpl_bsr_u32, NULL,
461 iemAImpl_bsr_u64, NULL
462};
463
464/** Function table for the IMUL instruction. */
465IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
466{
467 NULL, NULL,
468 iemAImpl_imul_two_u16, NULL,
469 iemAImpl_imul_two_u32, NULL,
470 iemAImpl_imul_two_u64, NULL
471};
472
473/** Group 1 /r lookup table. */
474IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
475{
476 &g_iemAImpl_add,
477 &g_iemAImpl_or,
478 &g_iemAImpl_adc,
479 &g_iemAImpl_sbb,
480 &g_iemAImpl_and,
481 &g_iemAImpl_sub,
482 &g_iemAImpl_xor,
483 &g_iemAImpl_cmp
484};
485
486/** Function table for the INC instruction. */
487IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
488{
489 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
490 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
491 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
492 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
493};
494
495/** Function table for the DEC instruction. */
496IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
497{
498 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
499 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
500 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
501 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
502};
503
504/** Function table for the NEG instruction. */
505IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
506{
507 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
508 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
509 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
510 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
511};
512
513/** Function table for the NOT instruction. */
514IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
515{
516 iemAImpl_not_u8, iemAImpl_not_u8_locked,
517 iemAImpl_not_u16, iemAImpl_not_u16_locked,
518 iemAImpl_not_u32, iemAImpl_not_u32_locked,
519 iemAImpl_not_u64, iemAImpl_not_u64_locked
520};
521
522
523/** Function table for the ROL instruction. */
524IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
525{
526 iemAImpl_rol_u8,
527 iemAImpl_rol_u16,
528 iemAImpl_rol_u32,
529 iemAImpl_rol_u64
530};
531
532/** Function table for the ROR instruction. */
533IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
534{
535 iemAImpl_ror_u8,
536 iemAImpl_ror_u16,
537 iemAImpl_ror_u32,
538 iemAImpl_ror_u64
539};
540
541/** Function table for the RCL instruction. */
542IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
543{
544 iemAImpl_rcl_u8,
545 iemAImpl_rcl_u16,
546 iemAImpl_rcl_u32,
547 iemAImpl_rcl_u64
548};
549
550/** Function table for the RCR instruction. */
551IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
552{
553 iemAImpl_rcr_u8,
554 iemAImpl_rcr_u16,
555 iemAImpl_rcr_u32,
556 iemAImpl_rcr_u64
557};
558
559/** Function table for the SHL instruction. */
560IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
561{
562 iemAImpl_shl_u8,
563 iemAImpl_shl_u16,
564 iemAImpl_shl_u32,
565 iemAImpl_shl_u64
566};
567
568/** Function table for the SHR instruction. */
569IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
570{
571 iemAImpl_shr_u8,
572 iemAImpl_shr_u16,
573 iemAImpl_shr_u32,
574 iemAImpl_shr_u64
575};
576
577/** Function table for the SAR instruction. */
578IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
579{
580 iemAImpl_sar_u8,
581 iemAImpl_sar_u16,
582 iemAImpl_sar_u32,
583 iemAImpl_sar_u64
584};
585
586
587/** Function table for the MUL instruction. */
588IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
589{
590 iemAImpl_mul_u8,
591 iemAImpl_mul_u16,
592 iemAImpl_mul_u32,
593 iemAImpl_mul_u64
594};
595
596/** Function table for the IMUL instruction working implicitly on rAX. */
597IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
598{
599 iemAImpl_imul_u8,
600 iemAImpl_imul_u16,
601 iemAImpl_imul_u32,
602 iemAImpl_imul_u64
603};
604
605/** Function table for the DIV instruction. */
606IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
607{
608 iemAImpl_div_u8,
609 iemAImpl_div_u16,
610 iemAImpl_div_u32,
611 iemAImpl_div_u64
612};
613
614/** Function table for the MUL instruction. */
615IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
616{
617 iemAImpl_idiv_u8,
618 iemAImpl_idiv_u16,
619 iemAImpl_idiv_u32,
620 iemAImpl_idiv_u64
621};
622
623/** Function table for the SHLD instruction */
624IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
625{
626 iemAImpl_shld_u16,
627 iemAImpl_shld_u32,
628 iemAImpl_shld_u64,
629};
630
631/** Function table for the SHRD instruction */
632IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
633{
634 iemAImpl_shrd_u16,
635 iemAImpl_shrd_u32,
636 iemAImpl_shrd_u64,
637};
638
639
640/** Function table for the PUNPCKLBW instruction */
641IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
642/** Function table for the PUNPCKLBD instruction */
643IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
644/** Function table for the PUNPCKLDQ instruction */
645IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
646/** Function table for the PUNPCKLQDQ instruction */
647IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
648
649/** Function table for the PUNPCKHBW instruction */
650IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
651/** Function table for the PUNPCKHBD instruction */
652IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
653/** Function table for the PUNPCKHDQ instruction */
654IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
655/** Function table for the PUNPCKHQDQ instruction */
656IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
657
658/** Function table for the PXOR instruction */
659IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
660/** Function table for the PCMPEQB instruction */
661IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
662/** Function table for the PCMPEQW instruction */
663IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
664/** Function table for the PCMPEQD instruction */
665IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
666
667
668#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
669/** What IEM just wrote. */
670uint8_t g_abIemWrote[256];
671/** How much IEM just wrote. */
672size_t g_cbIemWrote;
673#endif
674
675
676/*********************************************************************************************************************************
677* Internal Functions *
678*********************************************************************************************************************************/
679IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
680IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
681IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
682IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
683/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
684IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
685IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
686IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
687IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
688IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
689IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
690IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
691IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
692IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
693IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
694IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
695IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
696IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
698IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
699IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
700IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
701IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
706IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
707IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
708IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
709IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
710IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
711IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
712
713#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
714IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
715#endif
716IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
717IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
718
719
720
721/**
722 * Sets the pass up status.
723 *
724 * @returns VINF_SUCCESS.
725 * @param pIemCpu The per CPU IEM state of the calling thread.
726 * @param rcPassUp The pass up status. Must be informational.
727 * VINF_SUCCESS is not allowed.
728 */
729IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
730{
731 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
732
733 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
734 if (rcOldPassUp == VINF_SUCCESS)
735 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
736 /* If both are EM scheduling codes, use EM priority rules. */
737 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
738 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
739 {
740 if (rcPassUp < rcOldPassUp)
741 {
742 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
743 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
744 }
745 else
746 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
747 }
748 /* Override EM scheduling with specific status code. */
749 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
750 {
751 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
752 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
753 }
754 /* Don't override specific status code, first come first served. */
755 else
756 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
757 return VINF_SUCCESS;
758}
759
760
761/**
762 * Initializes the execution state.
763 *
764 * @param pIemCpu The per CPU IEM state.
765 * @param fBypassHandlers Whether to bypass access handlers.
766 */
767DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
768{
769 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
770 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
771
772 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
773 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
774
775#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
776 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
778 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
779 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
780 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
781 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
782 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
783 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
784#endif
785
786#ifdef VBOX_WITH_RAW_MODE_NOT_R0
787 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
788#endif
789 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
790 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
791 ? IEMMODE_64BIT
792 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
793 ? IEMMODE_32BIT
794 : IEMMODE_16BIT;
795 pIemCpu->enmCpuMode = enmMode;
796#ifdef VBOX_STRICT
797 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
798 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
799 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
800 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
801 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
802 pIemCpu->uRexReg = 127;
803 pIemCpu->uRexB = 127;
804 pIemCpu->uRexIndex = 127;
805 pIemCpu->iEffSeg = 127;
806 pIemCpu->offOpcode = 127;
807 pIemCpu->cbOpcode = 127;
808#endif
809
810 pIemCpu->cActiveMappings = 0;
811 pIemCpu->iNextMapping = 0;
812 pIemCpu->rcPassUp = VINF_SUCCESS;
813 pIemCpu->fBypassHandlers = fBypassHandlers;
814#ifdef VBOX_WITH_RAW_MODE_NOT_R0
815 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
816 && pCtx->cs.u64Base == 0
817 && pCtx->cs.u32Limit == UINT32_MAX
818 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
819 if (!pIemCpu->fInPatchCode)
820 CPUMRawLeave(pVCpu, VINF_SUCCESS);
821#endif
822}
823
824
825/**
826 * Initializes the decoder state.
827 *
828 * @param pIemCpu The per CPU IEM state.
829 * @param fBypassHandlers Whether to bypass access handlers.
830 */
831DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
832{
833 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
834 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
835
836 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
837 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
838
839#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
840 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
841 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
842 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
843 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
844 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
848#endif
849
850#ifdef VBOX_WITH_RAW_MODE_NOT_R0
851 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
852#endif
853 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
854#ifdef IEM_VERIFICATION_MODE_FULL
855 if (pIemCpu->uInjectCpl != UINT8_MAX)
856 pIemCpu->uCpl = pIemCpu->uInjectCpl;
857#endif
858 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
859 ? IEMMODE_64BIT
860 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
861 ? IEMMODE_32BIT
862 : IEMMODE_16BIT;
863 pIemCpu->enmCpuMode = enmMode;
864 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
865 pIemCpu->enmEffAddrMode = enmMode;
866 if (enmMode != IEMMODE_64BIT)
867 {
868 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
869 pIemCpu->enmEffOpSize = enmMode;
870 }
871 else
872 {
873 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
874 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
875 }
876 pIemCpu->fPrefixes = 0;
877 pIemCpu->uRexReg = 0;
878 pIemCpu->uRexB = 0;
879 pIemCpu->uRexIndex = 0;
880 pIemCpu->iEffSeg = X86_SREG_DS;
881 pIemCpu->offOpcode = 0;
882 pIemCpu->cbOpcode = 0;
883 pIemCpu->cActiveMappings = 0;
884 pIemCpu->iNextMapping = 0;
885 pIemCpu->rcPassUp = VINF_SUCCESS;
886 pIemCpu->fBypassHandlers = fBypassHandlers;
887#ifdef VBOX_WITH_RAW_MODE_NOT_R0
888 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
889 && pCtx->cs.u64Base == 0
890 && pCtx->cs.u32Limit == UINT32_MAX
891 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
892 if (!pIemCpu->fInPatchCode)
893 CPUMRawLeave(pVCpu, VINF_SUCCESS);
894#endif
895
896#ifdef DBGFTRACE_ENABLED
897 switch (enmMode)
898 {
899 case IEMMODE_64BIT:
900 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
901 break;
902 case IEMMODE_32BIT:
903 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
904 break;
905 case IEMMODE_16BIT:
906 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
907 break;
908 }
909#endif
910}
911
912
913/**
914 * Prefetch opcodes the first time when starting executing.
915 *
916 * @returns Strict VBox status code.
917 * @param pIemCpu The IEM state.
918 * @param fBypassHandlers Whether to bypass access handlers.
919 */
920IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
921{
922#ifdef IEM_VERIFICATION_MODE_FULL
923 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
924#endif
925 iemInitDecoder(pIemCpu, fBypassHandlers);
926
927 /*
928 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
929 *
930 * First translate CS:rIP to a physical address.
931 */
932 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
933 uint32_t cbToTryRead;
934 RTGCPTR GCPtrPC;
935 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
936 {
937 cbToTryRead = PAGE_SIZE;
938 GCPtrPC = pCtx->rip;
939 if (!IEM_IS_CANONICAL(GCPtrPC))
940 return iemRaiseGeneralProtectionFault0(pIemCpu);
941 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
942 }
943 else
944 {
945 uint32_t GCPtrPC32 = pCtx->eip;
946 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
947 if (GCPtrPC32 > pCtx->cs.u32Limit)
948 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
949 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
950 if (!cbToTryRead) /* overflowed */
951 {
952 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
953 cbToTryRead = UINT32_MAX;
954 }
955 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
956 Assert(GCPtrPC <= UINT32_MAX);
957 }
958
959#ifdef VBOX_WITH_RAW_MODE_NOT_R0
960 /* Allow interpretation of patch manager code blocks since they can for
961 instance throw #PFs for perfectly good reasons. */
962 if (pIemCpu->fInPatchCode)
963 {
964 size_t cbRead = 0;
965 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
966 AssertRCReturn(rc, rc);
967 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
968 return VINF_SUCCESS;
969 }
970#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
971
972 RTGCPHYS GCPhys;
973 uint64_t fFlags;
974 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
975 if (RT_FAILURE(rc))
976 {
977 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
978 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
979 }
980 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
981 {
982 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
983 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
984 }
985 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
986 {
987 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
988 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
989 }
990 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
991 /** @todo Check reserved bits and such stuff. PGM is better at doing
992 * that, so do it when implementing the guest virtual address
993 * TLB... */
994
995#ifdef IEM_VERIFICATION_MODE_FULL
996 /*
997 * Optimistic optimization: Use unconsumed opcode bytes from the previous
998 * instruction.
999 */
1000 /** @todo optimize this differently by not using PGMPhysRead. */
1001 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1002 pIemCpu->GCPhysOpcodes = GCPhys;
1003 if ( offPrevOpcodes < cbOldOpcodes
1004 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1005 {
1006 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1007 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1008 pIemCpu->cbOpcode = cbNew;
1009 return VINF_SUCCESS;
1010 }
1011#endif
1012
1013 /*
1014 * Read the bytes at this address.
1015 */
1016 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1017#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1018 size_t cbActual;
1019 if ( PATMIsEnabled(pVM)
1020 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1021 {
1022 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1023 Assert(cbActual > 0);
1024 pIemCpu->cbOpcode = (uint8_t)cbActual;
1025 }
1026 else
1027#endif
1028 {
1029 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1030 if (cbToTryRead > cbLeftOnPage)
1031 cbToTryRead = cbLeftOnPage;
1032 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1033 cbToTryRead = sizeof(pIemCpu->abOpcode);
1034
1035 if (!pIemCpu->fBypassHandlers)
1036 {
1037 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1038 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1039 { /* likely */ }
1040 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1041 {
1042 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1043 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1044 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1045 }
1046 else
1047 {
1048 Log((RT_SUCCESS(rcStrict)
1049 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1050 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1051 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1052 return rcStrict;
1053 }
1054 }
1055 else
1056 {
1057 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1058 if (RT_SUCCESS(rc))
1059 { /* likely */ }
1060 else
1061 {
1062 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1063 GCPtrPC, GCPhys, rc, cbToTryRead));
1064 return rc;
1065 }
1066 }
1067 pIemCpu->cbOpcode = cbToTryRead;
1068 }
1069
1070 return VINF_SUCCESS;
1071}
1072
1073
1074/**
1075 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1076 * exception if it fails.
1077 *
1078 * @returns Strict VBox status code.
1079 * @param pIemCpu The IEM state.
1080 * @param cbMin The minimum number of bytes relative offOpcode
1081 * that must be read.
1082 */
1083IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1084{
1085 /*
1086 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1087 *
1088 * First translate CS:rIP to a physical address.
1089 */
1090 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1091 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1092 uint32_t cbToTryRead;
1093 RTGCPTR GCPtrNext;
1094 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1095 {
1096 cbToTryRead = PAGE_SIZE;
1097 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1098 if (!IEM_IS_CANONICAL(GCPtrNext))
1099 return iemRaiseGeneralProtectionFault0(pIemCpu);
1100 }
1101 else
1102 {
1103 uint32_t GCPtrNext32 = pCtx->eip;
1104 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1105 GCPtrNext32 += pIemCpu->cbOpcode;
1106 if (GCPtrNext32 > pCtx->cs.u32Limit)
1107 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1108 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1109 if (!cbToTryRead) /* overflowed */
1110 {
1111 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1112 cbToTryRead = UINT32_MAX;
1113 /** @todo check out wrapping around the code segment. */
1114 }
1115 if (cbToTryRead < cbMin - cbLeft)
1116 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1117 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1118 }
1119
1120 /* Only read up to the end of the page, and make sure we don't read more
1121 than the opcode buffer can hold. */
1122 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1123 if (cbToTryRead > cbLeftOnPage)
1124 cbToTryRead = cbLeftOnPage;
1125 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1126 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1127/** @todo r=bird: Convert assertion into undefined opcode exception? */
1128 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1129
1130#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1131 /* Allow interpretation of patch manager code blocks since they can for
1132 instance throw #PFs for perfectly good reasons. */
1133 if (pIemCpu->fInPatchCode)
1134 {
1135 size_t cbRead = 0;
1136 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1137 AssertRCReturn(rc, rc);
1138 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1139 return VINF_SUCCESS;
1140 }
1141#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1142
1143 RTGCPHYS GCPhys;
1144 uint64_t fFlags;
1145 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1146 if (RT_FAILURE(rc))
1147 {
1148 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1149 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1150 }
1151 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1152 {
1153 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1154 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1155 }
1156 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1157 {
1158 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1159 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1160 }
1161 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1162 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1163 /** @todo Check reserved bits and such stuff. PGM is better at doing
1164 * that, so do it when implementing the guest virtual address
1165 * TLB... */
1166
1167 /*
1168 * Read the bytes at this address.
1169 *
1170 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1171 * and since PATM should only patch the start of an instruction there
1172 * should be no need to check again here.
1173 */
1174 if (!pIemCpu->fBypassHandlers)
1175 {
1176 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1177 cbToTryRead, PGMACCESSORIGIN_IEM);
1178 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1179 { /* likely */ }
1180 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1181 {
1182 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1183 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1184 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1185 }
1186 else
1187 {
1188 Log((RT_SUCCESS(rcStrict)
1189 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1190 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1191 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1192 return rcStrict;
1193 }
1194 }
1195 else
1196 {
1197 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1198 if (RT_SUCCESS(rc))
1199 { /* likely */ }
1200 else
1201 {
1202 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1203 return rc;
1204 }
1205 }
1206 pIemCpu->cbOpcode += cbToTryRead;
1207 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1208
1209 return VINF_SUCCESS;
1210}
1211
1212
1213/**
1214 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1215 *
1216 * @returns Strict VBox status code.
1217 * @param pIemCpu The IEM state.
1218 * @param pb Where to return the opcode byte.
1219 */
1220DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1221{
1222 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1223 if (rcStrict == VINF_SUCCESS)
1224 {
1225 uint8_t offOpcode = pIemCpu->offOpcode;
1226 *pb = pIemCpu->abOpcode[offOpcode];
1227 pIemCpu->offOpcode = offOpcode + 1;
1228 }
1229 else
1230 *pb = 0;
1231 return rcStrict;
1232}
1233
1234
1235/**
1236 * Fetches the next opcode byte.
1237 *
1238 * @returns Strict VBox status code.
1239 * @param pIemCpu The IEM state.
1240 * @param pu8 Where to return the opcode byte.
1241 */
1242DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1243{
1244 uint8_t const offOpcode = pIemCpu->offOpcode;
1245 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1246 {
1247 *pu8 = pIemCpu->abOpcode[offOpcode];
1248 pIemCpu->offOpcode = offOpcode + 1;
1249 return VINF_SUCCESS;
1250 }
1251 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1252}
1253
1254
1255/**
1256 * Fetches the next opcode byte, returns automatically on failure.
1257 *
1258 * @param a_pu8 Where to return the opcode byte.
1259 * @remark Implicitly references pIemCpu.
1260 */
1261#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1262 do \
1263 { \
1264 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1265 if (rcStrict2 != VINF_SUCCESS) \
1266 return rcStrict2; \
1267 } while (0)
1268
1269
1270/**
1271 * Fetches the next signed byte from the opcode stream.
1272 *
1273 * @returns Strict VBox status code.
1274 * @param pIemCpu The IEM state.
1275 * @param pi8 Where to return the signed byte.
1276 */
1277DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1278{
1279 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1280}
1281
1282
1283/**
1284 * Fetches the next signed byte from the opcode stream, returning automatically
1285 * on failure.
1286 *
1287 * @param pi8 Where to return the signed byte.
1288 * @remark Implicitly references pIemCpu.
1289 */
1290#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1291 do \
1292 { \
1293 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1294 if (rcStrict2 != VINF_SUCCESS) \
1295 return rcStrict2; \
1296 } while (0)
1297
1298
1299/**
1300 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1301 *
1302 * @returns Strict VBox status code.
1303 * @param pIemCpu The IEM state.
1304 * @param pu16 Where to return the opcode dword.
1305 */
1306DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1307{
1308 uint8_t u8;
1309 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1310 if (rcStrict == VINF_SUCCESS)
1311 *pu16 = (int8_t)u8;
1312 return rcStrict;
1313}
1314
1315
1316/**
1317 * Fetches the next signed byte from the opcode stream, extending it to
1318 * unsigned 16-bit.
1319 *
1320 * @returns Strict VBox status code.
1321 * @param pIemCpu The IEM state.
1322 * @param pu16 Where to return the unsigned word.
1323 */
1324DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1325{
1326 uint8_t const offOpcode = pIemCpu->offOpcode;
1327 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1328 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1329
1330 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1331 pIemCpu->offOpcode = offOpcode + 1;
1332 return VINF_SUCCESS;
1333}
1334
1335
1336/**
1337 * Fetches the next signed byte from the opcode stream and sign-extending it to
1338 * a word, returning automatically on failure.
1339 *
1340 * @param pu16 Where to return the word.
1341 * @remark Implicitly references pIemCpu.
1342 */
1343#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1344 do \
1345 { \
1346 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1347 if (rcStrict2 != VINF_SUCCESS) \
1348 return rcStrict2; \
1349 } while (0)
1350
1351
1352/**
1353 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1354 *
1355 * @returns Strict VBox status code.
1356 * @param pIemCpu The IEM state.
1357 * @param pu32 Where to return the opcode dword.
1358 */
1359DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1360{
1361 uint8_t u8;
1362 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1363 if (rcStrict == VINF_SUCCESS)
1364 *pu32 = (int8_t)u8;
1365 return rcStrict;
1366}
1367
1368
1369/**
1370 * Fetches the next signed byte from the opcode stream, extending it to
1371 * unsigned 32-bit.
1372 *
1373 * @returns Strict VBox status code.
1374 * @param pIemCpu The IEM state.
1375 * @param pu32 Where to return the unsigned dword.
1376 */
1377DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1378{
1379 uint8_t const offOpcode = pIemCpu->offOpcode;
1380 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1381 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1382
1383 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1384 pIemCpu->offOpcode = offOpcode + 1;
1385 return VINF_SUCCESS;
1386}
1387
1388
1389/**
1390 * Fetches the next signed byte from the opcode stream and sign-extending it to
1391 * a word, returning automatically on failure.
1392 *
1393 * @param pu32 Where to return the word.
1394 * @remark Implicitly references pIemCpu.
1395 */
1396#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1397 do \
1398 { \
1399 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1400 if (rcStrict2 != VINF_SUCCESS) \
1401 return rcStrict2; \
1402 } while (0)
1403
1404
1405/**
1406 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1407 *
1408 * @returns Strict VBox status code.
1409 * @param pIemCpu The IEM state.
1410 * @param pu64 Where to return the opcode qword.
1411 */
1412DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1413{
1414 uint8_t u8;
1415 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1416 if (rcStrict == VINF_SUCCESS)
1417 *pu64 = (int8_t)u8;
1418 return rcStrict;
1419}
1420
1421
1422/**
1423 * Fetches the next signed byte from the opcode stream, extending it to
1424 * unsigned 64-bit.
1425 *
1426 * @returns Strict VBox status code.
1427 * @param pIemCpu The IEM state.
1428 * @param pu64 Where to return the unsigned qword.
1429 */
1430DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1431{
1432 uint8_t const offOpcode = pIemCpu->offOpcode;
1433 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1434 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1435
1436 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1437 pIemCpu->offOpcode = offOpcode + 1;
1438 return VINF_SUCCESS;
1439}
1440
1441
1442/**
1443 * Fetches the next signed byte from the opcode stream and sign-extending it to
1444 * a word, returning automatically on failure.
1445 *
1446 * @param pu64 Where to return the word.
1447 * @remark Implicitly references pIemCpu.
1448 */
1449#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1450 do \
1451 { \
1452 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1453 if (rcStrict2 != VINF_SUCCESS) \
1454 return rcStrict2; \
1455 } while (0)
1456
1457
1458/**
1459 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1460 *
1461 * @returns Strict VBox status code.
1462 * @param pIemCpu The IEM state.
1463 * @param pu16 Where to return the opcode word.
1464 */
1465DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1466{
1467 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1468 if (rcStrict == VINF_SUCCESS)
1469 {
1470 uint8_t offOpcode = pIemCpu->offOpcode;
1471 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1472 pIemCpu->offOpcode = offOpcode + 2;
1473 }
1474 else
1475 *pu16 = 0;
1476 return rcStrict;
1477}
1478
1479
1480/**
1481 * Fetches the next opcode word.
1482 *
1483 * @returns Strict VBox status code.
1484 * @param pIemCpu The IEM state.
1485 * @param pu16 Where to return the opcode word.
1486 */
1487DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1488{
1489 uint8_t const offOpcode = pIemCpu->offOpcode;
1490 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1491 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1492
1493 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1494 pIemCpu->offOpcode = offOpcode + 2;
1495 return VINF_SUCCESS;
1496}
1497
1498
1499/**
1500 * Fetches the next opcode word, returns automatically on failure.
1501 *
1502 * @param a_pu16 Where to return the opcode word.
1503 * @remark Implicitly references pIemCpu.
1504 */
1505#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1506 do \
1507 { \
1508 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1509 if (rcStrict2 != VINF_SUCCESS) \
1510 return rcStrict2; \
1511 } while (0)
1512
1513
1514/**
1515 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1516 *
1517 * @returns Strict VBox status code.
1518 * @param pIemCpu The IEM state.
1519 * @param pu32 Where to return the opcode double word.
1520 */
1521DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1522{
1523 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1524 if (rcStrict == VINF_SUCCESS)
1525 {
1526 uint8_t offOpcode = pIemCpu->offOpcode;
1527 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1528 pIemCpu->offOpcode = offOpcode + 2;
1529 }
1530 else
1531 *pu32 = 0;
1532 return rcStrict;
1533}
1534
1535
1536/**
1537 * Fetches the next opcode word, zero extending it to a double word.
1538 *
1539 * @returns Strict VBox status code.
1540 * @param pIemCpu The IEM state.
1541 * @param pu32 Where to return the opcode double word.
1542 */
1543DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1544{
1545 uint8_t const offOpcode = pIemCpu->offOpcode;
1546 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1547 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1548
1549 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1550 pIemCpu->offOpcode = offOpcode + 2;
1551 return VINF_SUCCESS;
1552}
1553
1554
1555/**
1556 * Fetches the next opcode word and zero extends it to a double word, returns
1557 * automatically on failure.
1558 *
1559 * @param a_pu32 Where to return the opcode double word.
1560 * @remark Implicitly references pIemCpu.
1561 */
1562#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1563 do \
1564 { \
1565 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1566 if (rcStrict2 != VINF_SUCCESS) \
1567 return rcStrict2; \
1568 } while (0)
1569
1570
1571/**
1572 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1573 *
1574 * @returns Strict VBox status code.
1575 * @param pIemCpu The IEM state.
1576 * @param pu64 Where to return the opcode quad word.
1577 */
1578DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1579{
1580 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1581 if (rcStrict == VINF_SUCCESS)
1582 {
1583 uint8_t offOpcode = pIemCpu->offOpcode;
1584 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1585 pIemCpu->offOpcode = offOpcode + 2;
1586 }
1587 else
1588 *pu64 = 0;
1589 return rcStrict;
1590}
1591
1592
1593/**
1594 * Fetches the next opcode word, zero extending it to a quad word.
1595 *
1596 * @returns Strict VBox status code.
1597 * @param pIemCpu The IEM state.
1598 * @param pu64 Where to return the opcode quad word.
1599 */
1600DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1601{
1602 uint8_t const offOpcode = pIemCpu->offOpcode;
1603 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1604 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1605
1606 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1607 pIemCpu->offOpcode = offOpcode + 2;
1608 return VINF_SUCCESS;
1609}
1610
1611
1612/**
1613 * Fetches the next opcode word and zero extends it to a quad word, returns
1614 * automatically on failure.
1615 *
1616 * @param a_pu64 Where to return the opcode quad word.
1617 * @remark Implicitly references pIemCpu.
1618 */
1619#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1620 do \
1621 { \
1622 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1623 if (rcStrict2 != VINF_SUCCESS) \
1624 return rcStrict2; \
1625 } while (0)
1626
1627
1628/**
1629 * Fetches the next signed word from the opcode stream.
1630 *
1631 * @returns Strict VBox status code.
1632 * @param pIemCpu The IEM state.
1633 * @param pi16 Where to return the signed word.
1634 */
1635DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1636{
1637 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1638}
1639
1640
1641/**
1642 * Fetches the next signed word from the opcode stream, returning automatically
1643 * on failure.
1644 *
1645 * @param pi16 Where to return the signed word.
1646 * @remark Implicitly references pIemCpu.
1647 */
1648#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1649 do \
1650 { \
1651 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1652 if (rcStrict2 != VINF_SUCCESS) \
1653 return rcStrict2; \
1654 } while (0)
1655
1656
1657/**
1658 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1659 *
1660 * @returns Strict VBox status code.
1661 * @param pIemCpu The IEM state.
1662 * @param pu32 Where to return the opcode dword.
1663 */
1664DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1665{
1666 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1667 if (rcStrict == VINF_SUCCESS)
1668 {
1669 uint8_t offOpcode = pIemCpu->offOpcode;
1670 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1671 pIemCpu->abOpcode[offOpcode + 1],
1672 pIemCpu->abOpcode[offOpcode + 2],
1673 pIemCpu->abOpcode[offOpcode + 3]);
1674 pIemCpu->offOpcode = offOpcode + 4;
1675 }
1676 else
1677 *pu32 = 0;
1678 return rcStrict;
1679}
1680
1681
1682/**
1683 * Fetches the next opcode dword.
1684 *
1685 * @returns Strict VBox status code.
1686 * @param pIemCpu The IEM state.
1687 * @param pu32 Where to return the opcode double word.
1688 */
1689DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1690{
1691 uint8_t const offOpcode = pIemCpu->offOpcode;
1692 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1693 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1694
1695 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1696 pIemCpu->abOpcode[offOpcode + 1],
1697 pIemCpu->abOpcode[offOpcode + 2],
1698 pIemCpu->abOpcode[offOpcode + 3]);
1699 pIemCpu->offOpcode = offOpcode + 4;
1700 return VINF_SUCCESS;
1701}
1702
1703
1704/**
1705 * Fetches the next opcode dword, returns automatically on failure.
1706 *
1707 * @param a_pu32 Where to return the opcode dword.
1708 * @remark Implicitly references pIemCpu.
1709 */
1710#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1711 do \
1712 { \
1713 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1714 if (rcStrict2 != VINF_SUCCESS) \
1715 return rcStrict2; \
1716 } while (0)
1717
1718
1719/**
1720 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1721 *
1722 * @returns Strict VBox status code.
1723 * @param pIemCpu The IEM state.
1724 * @param pu32 Where to return the opcode dword.
1725 */
1726DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1727{
1728 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1729 if (rcStrict == VINF_SUCCESS)
1730 {
1731 uint8_t offOpcode = pIemCpu->offOpcode;
1732 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1733 pIemCpu->abOpcode[offOpcode + 1],
1734 pIemCpu->abOpcode[offOpcode + 2],
1735 pIemCpu->abOpcode[offOpcode + 3]);
1736 pIemCpu->offOpcode = offOpcode + 4;
1737 }
1738 else
1739 *pu64 = 0;
1740 return rcStrict;
1741}
1742
1743
1744/**
1745 * Fetches the next opcode dword, zero extending it to a quad word.
1746 *
1747 * @returns Strict VBox status code.
1748 * @param pIemCpu The IEM state.
1749 * @param pu64 Where to return the opcode quad word.
1750 */
1751DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1752{
1753 uint8_t const offOpcode = pIemCpu->offOpcode;
1754 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1755 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1756
1757 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1758 pIemCpu->abOpcode[offOpcode + 1],
1759 pIemCpu->abOpcode[offOpcode + 2],
1760 pIemCpu->abOpcode[offOpcode + 3]);
1761 pIemCpu->offOpcode = offOpcode + 4;
1762 return VINF_SUCCESS;
1763}
1764
1765
1766/**
1767 * Fetches the next opcode dword and zero extends it to a quad word, returns
1768 * automatically on failure.
1769 *
1770 * @param a_pu64 Where to return the opcode quad word.
1771 * @remark Implicitly references pIemCpu.
1772 */
1773#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1774 do \
1775 { \
1776 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1777 if (rcStrict2 != VINF_SUCCESS) \
1778 return rcStrict2; \
1779 } while (0)
1780
1781
1782/**
1783 * Fetches the next signed double word from the opcode stream.
1784 *
1785 * @returns Strict VBox status code.
1786 * @param pIemCpu The IEM state.
1787 * @param pi32 Where to return the signed double word.
1788 */
1789DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1790{
1791 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1792}
1793
1794/**
1795 * Fetches the next signed double word from the opcode stream, returning
1796 * automatically on failure.
1797 *
1798 * @param pi32 Where to return the signed double word.
1799 * @remark Implicitly references pIemCpu.
1800 */
1801#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1802 do \
1803 { \
1804 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1805 if (rcStrict2 != VINF_SUCCESS) \
1806 return rcStrict2; \
1807 } while (0)
1808
1809
1810/**
1811 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1812 *
1813 * @returns Strict VBox status code.
1814 * @param pIemCpu The IEM state.
1815 * @param pu64 Where to return the opcode qword.
1816 */
1817DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1818{
1819 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1820 if (rcStrict == VINF_SUCCESS)
1821 {
1822 uint8_t offOpcode = pIemCpu->offOpcode;
1823 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1824 pIemCpu->abOpcode[offOpcode + 1],
1825 pIemCpu->abOpcode[offOpcode + 2],
1826 pIemCpu->abOpcode[offOpcode + 3]);
1827 pIemCpu->offOpcode = offOpcode + 4;
1828 }
1829 else
1830 *pu64 = 0;
1831 return rcStrict;
1832}
1833
1834
1835/**
1836 * Fetches the next opcode dword, sign extending it into a quad word.
1837 *
1838 * @returns Strict VBox status code.
1839 * @param pIemCpu The IEM state.
1840 * @param pu64 Where to return the opcode quad word.
1841 */
1842DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1843{
1844 uint8_t const offOpcode = pIemCpu->offOpcode;
1845 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1846 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1847
1848 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1849 pIemCpu->abOpcode[offOpcode + 1],
1850 pIemCpu->abOpcode[offOpcode + 2],
1851 pIemCpu->abOpcode[offOpcode + 3]);
1852 *pu64 = i32;
1853 pIemCpu->offOpcode = offOpcode + 4;
1854 return VINF_SUCCESS;
1855}
1856
1857
1858/**
1859 * Fetches the next opcode double word and sign extends it to a quad word,
1860 * returns automatically on failure.
1861 *
1862 * @param a_pu64 Where to return the opcode quad word.
1863 * @remark Implicitly references pIemCpu.
1864 */
1865#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1866 do \
1867 { \
1868 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1869 if (rcStrict2 != VINF_SUCCESS) \
1870 return rcStrict2; \
1871 } while (0)
1872
1873
1874/**
1875 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1876 *
1877 * @returns Strict VBox status code.
1878 * @param pIemCpu The IEM state.
1879 * @param pu64 Where to return the opcode qword.
1880 */
1881DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1882{
1883 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1884 if (rcStrict == VINF_SUCCESS)
1885 {
1886 uint8_t offOpcode = pIemCpu->offOpcode;
1887 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1888 pIemCpu->abOpcode[offOpcode + 1],
1889 pIemCpu->abOpcode[offOpcode + 2],
1890 pIemCpu->abOpcode[offOpcode + 3],
1891 pIemCpu->abOpcode[offOpcode + 4],
1892 pIemCpu->abOpcode[offOpcode + 5],
1893 pIemCpu->abOpcode[offOpcode + 6],
1894 pIemCpu->abOpcode[offOpcode + 7]);
1895 pIemCpu->offOpcode = offOpcode + 8;
1896 }
1897 else
1898 *pu64 = 0;
1899 return rcStrict;
1900}
1901
1902
1903/**
1904 * Fetches the next opcode qword.
1905 *
1906 * @returns Strict VBox status code.
1907 * @param pIemCpu The IEM state.
1908 * @param pu64 Where to return the opcode qword.
1909 */
1910DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1911{
1912 uint8_t const offOpcode = pIemCpu->offOpcode;
1913 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1914 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1915
1916 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1917 pIemCpu->abOpcode[offOpcode + 1],
1918 pIemCpu->abOpcode[offOpcode + 2],
1919 pIemCpu->abOpcode[offOpcode + 3],
1920 pIemCpu->abOpcode[offOpcode + 4],
1921 pIemCpu->abOpcode[offOpcode + 5],
1922 pIemCpu->abOpcode[offOpcode + 6],
1923 pIemCpu->abOpcode[offOpcode + 7]);
1924 pIemCpu->offOpcode = offOpcode + 8;
1925 return VINF_SUCCESS;
1926}
1927
1928
1929/**
1930 * Fetches the next opcode quad word, returns automatically on failure.
1931 *
1932 * @param a_pu64 Where to return the opcode quad word.
1933 * @remark Implicitly references pIemCpu.
1934 */
1935#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1936 do \
1937 { \
1938 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1939 if (rcStrict2 != VINF_SUCCESS) \
1940 return rcStrict2; \
1941 } while (0)
1942
1943
1944/** @name Misc Worker Functions.
1945 * @{
1946 */
1947
1948
1949/**
1950 * Validates a new SS segment.
1951 *
1952 * @returns VBox strict status code.
1953 * @param pIemCpu The IEM per CPU instance data.
1954 * @param pCtx The CPU context.
1955 * @param NewSS The new SS selctor.
1956 * @param uCpl The CPL to load the stack for.
1957 * @param pDesc Where to return the descriptor.
1958 */
1959IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1960{
1961 NOREF(pCtx);
1962
1963 /* Null selectors are not allowed (we're not called for dispatching
1964 interrupts with SS=0 in long mode). */
1965 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1966 {
1967 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1968 return iemRaiseTaskSwitchFault0(pIemCpu);
1969 }
1970
1971 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1972 if ((NewSS & X86_SEL_RPL) != uCpl)
1973 {
1974 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1975 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1976 }
1977
1978 /*
1979 * Read the descriptor.
1980 */
1981 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1982 if (rcStrict != VINF_SUCCESS)
1983 return rcStrict;
1984
1985 /*
1986 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1987 */
1988 if (!pDesc->Legacy.Gen.u1DescType)
1989 {
1990 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1991 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1992 }
1993
1994 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1995 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1996 {
1997 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1998 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1999 }
2000 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2001 {
2002 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2003 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2004 }
2005
2006 /* Is it there? */
2007 /** @todo testcase: Is this checked before the canonical / limit check below? */
2008 if (!pDesc->Legacy.Gen.u1Present)
2009 {
2010 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2011 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2012 }
2013
2014 return VINF_SUCCESS;
2015}
2016
2017
2018/**
2019 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2020 * not.
2021 *
2022 * @param a_pIemCpu The IEM per CPU data.
2023 * @param a_pCtx The CPU context.
2024 */
2025#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2026# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2027 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2028 ? (a_pCtx)->eflags.u \
2029 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2030#else
2031# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2032 ( (a_pCtx)->eflags.u )
2033#endif
2034
2035/**
2036 * Updates the EFLAGS in the correct manner wrt. PATM.
2037 *
2038 * @param a_pIemCpu The IEM per CPU data.
2039 * @param a_pCtx The CPU context.
2040 */
2041#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2042# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2043 do { \
2044 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2045 (a_pCtx)->eflags.u = (a_fEfl); \
2046 else \
2047 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2048 } while (0)
2049#else
2050# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2051 do { \
2052 (a_pCtx)->eflags.u = (a_fEfl); \
2053 } while (0)
2054#endif
2055
2056
2057/** @} */
2058
2059/** @name Raising Exceptions.
2060 *
2061 * @{
2062 */
2063
2064/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2065 * @{ */
2066/** CPU exception. */
2067#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2068/** External interrupt (from PIC, APIC, whatever). */
2069#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2070/** Software interrupt (int or into, not bound).
2071 * Returns to the following instruction */
2072#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2073/** Takes an error code. */
2074#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2075/** Takes a CR2. */
2076#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2077/** Generated by the breakpoint instruction. */
2078#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2079/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2080#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2081/** @} */
2082
2083
2084/**
2085 * Loads the specified stack far pointer from the TSS.
2086 *
2087 * @returns VBox strict status code.
2088 * @param pIemCpu The IEM per CPU instance data.
2089 * @param pCtx The CPU context.
2090 * @param uCpl The CPL to load the stack for.
2091 * @param pSelSS Where to return the new stack segment.
2092 * @param puEsp Where to return the new stack pointer.
2093 */
2094IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2095 PRTSEL pSelSS, uint32_t *puEsp)
2096{
2097 VBOXSTRICTRC rcStrict;
2098 Assert(uCpl < 4);
2099 *puEsp = 0; /* make gcc happy */
2100 *pSelSS = 0; /* make gcc happy */
2101
2102 switch (pCtx->tr.Attr.n.u4Type)
2103 {
2104 /*
2105 * 16-bit TSS (X86TSS16).
2106 */
2107 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2108 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2109 {
2110 uint32_t off = uCpl * 4 + 2;
2111 if (off + 4 > pCtx->tr.u32Limit)
2112 {
2113 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2114 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2115 }
2116
2117 uint32_t u32Tmp = 0; /* gcc maybe... */
2118 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2119 if (rcStrict == VINF_SUCCESS)
2120 {
2121 *puEsp = RT_LOWORD(u32Tmp);
2122 *pSelSS = RT_HIWORD(u32Tmp);
2123 return VINF_SUCCESS;
2124 }
2125 break;
2126 }
2127
2128 /*
2129 * 32-bit TSS (X86TSS32).
2130 */
2131 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2132 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2133 {
2134 uint32_t off = uCpl * 8 + 4;
2135 if (off + 7 > pCtx->tr.u32Limit)
2136 {
2137 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2138 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2139 }
2140
2141 uint64_t u64Tmp;
2142 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2143 if (rcStrict == VINF_SUCCESS)
2144 {
2145 *puEsp = u64Tmp & UINT32_MAX;
2146 *pSelSS = (RTSEL)(u64Tmp >> 32);
2147 return VINF_SUCCESS;
2148 }
2149 break;
2150 }
2151
2152 default:
2153 AssertFailedReturn(VERR_IEM_IPE_4);
2154 }
2155 return rcStrict;
2156}
2157
2158
2159/**
2160 * Loads the specified stack pointer from the 64-bit TSS.
2161 *
2162 * @returns VBox strict status code.
2163 * @param pIemCpu The IEM per CPU instance data.
2164 * @param pCtx The CPU context.
2165 * @param uCpl The CPL to load the stack for.
2166 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2167 * @param puRsp Where to return the new stack pointer.
2168 */
2169IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2170{
2171 Assert(uCpl < 4);
2172 Assert(uIst < 8);
2173 *puRsp = 0; /* make gcc happy */
2174
2175 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2176
2177 uint32_t off;
2178 if (uIst)
2179 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2180 else
2181 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2182 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2183 {
2184 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2185 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2186 }
2187
2188 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2189}
2190
2191
2192/**
2193 * Adjust the CPU state according to the exception being raised.
2194 *
2195 * @param pCtx The CPU context.
2196 * @param u8Vector The exception that has been raised.
2197 */
2198DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2199{
2200 switch (u8Vector)
2201 {
2202 case X86_XCPT_DB:
2203 pCtx->dr[7] &= ~X86_DR7_GD;
2204 break;
2205 /** @todo Read the AMD and Intel exception reference... */
2206 }
2207}
2208
2209
2210/**
2211 * Implements exceptions and interrupts for real mode.
2212 *
2213 * @returns VBox strict status code.
2214 * @param pIemCpu The IEM per CPU instance data.
2215 * @param pCtx The CPU context.
2216 * @param cbInstr The number of bytes to offset rIP by in the return
2217 * address.
2218 * @param u8Vector The interrupt / exception vector number.
2219 * @param fFlags The flags.
2220 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2221 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2222 */
2223IEM_STATIC VBOXSTRICTRC
2224iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2225 PCPUMCTX pCtx,
2226 uint8_t cbInstr,
2227 uint8_t u8Vector,
2228 uint32_t fFlags,
2229 uint16_t uErr,
2230 uint64_t uCr2)
2231{
2232 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2233 NOREF(uErr); NOREF(uCr2);
2234
2235 /*
2236 * Read the IDT entry.
2237 */
2238 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2239 {
2240 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2241 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2242 }
2243 RTFAR16 Idte;
2244 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2245 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2246 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2247 return rcStrict;
2248
2249 /*
2250 * Push the stack frame.
2251 */
2252 uint16_t *pu16Frame;
2253 uint64_t uNewRsp;
2254 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2255 if (rcStrict != VINF_SUCCESS)
2256 return rcStrict;
2257
2258 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2259 pu16Frame[2] = (uint16_t)fEfl;
2260 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2261 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2262 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2263 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2264 return rcStrict;
2265
2266 /*
2267 * Load the vector address into cs:ip and make exception specific state
2268 * adjustments.
2269 */
2270 pCtx->cs.Sel = Idte.sel;
2271 pCtx->cs.ValidSel = Idte.sel;
2272 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2273 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2274 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2275 pCtx->rip = Idte.off;
2276 fEfl &= ~X86_EFL_IF;
2277 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2278
2279 /** @todo do we actually do this in real mode? */
2280 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2281 iemRaiseXcptAdjustState(pCtx, u8Vector);
2282
2283 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2284}
2285
2286
2287/**
2288 * Loads a NULL data selector into when coming from V8086 mode.
2289 *
2290 * @param pIemCpu The IEM per CPU instance data.
2291 * @param pSReg Pointer to the segment register.
2292 */
2293IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2294{
2295 pSReg->Sel = 0;
2296 pSReg->ValidSel = 0;
2297 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2298 {
2299 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2300 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2301 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2302 }
2303 else
2304 {
2305 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2306 /** @todo check this on AMD-V */
2307 pSReg->u64Base = 0;
2308 pSReg->u32Limit = 0;
2309 }
2310}
2311
2312
2313/**
2314 * Loads a segment selector during a task switch in V8086 mode.
2315 *
2316 * @param pIemCpu The IEM per CPU instance data.
2317 * @param pSReg Pointer to the segment register.
2318 * @param uSel The selector value to load.
2319 */
2320IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2321{
2322 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2323 pSReg->Sel = uSel;
2324 pSReg->ValidSel = uSel;
2325 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2326 pSReg->u64Base = uSel << 4;
2327 pSReg->u32Limit = 0xffff;
2328 pSReg->Attr.u = 0xf3;
2329}
2330
2331
2332/**
2333 * Loads a NULL data selector into a selector register, both the hidden and
2334 * visible parts, in protected mode.
2335 *
2336 * @param pIemCpu The IEM state of the calling EMT.
2337 * @param pSReg Pointer to the segment register.
2338 * @param uRpl The RPL.
2339 */
2340IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2341{
2342 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2343 * data selector in protected mode. */
2344 pSReg->Sel = uRpl;
2345 pSReg->ValidSel = uRpl;
2346 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2347 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2348 {
2349 /* VT-x (Intel 3960x) observed doing something like this. */
2350 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2351 pSReg->u32Limit = UINT32_MAX;
2352 pSReg->u64Base = 0;
2353 }
2354 else
2355 {
2356 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2357 pSReg->u32Limit = 0;
2358 pSReg->u64Base = 0;
2359 }
2360}
2361
2362
2363/**
2364 * Loads a segment selector during a task switch in protected mode. In this task
2365 * switch scenario, we would throw #TS exceptions rather than #GPs.
2366 *
2367 * @returns VBox strict status code.
2368 * @param pIemCpu The IEM per CPU instance data.
2369 * @param pSReg Pointer to the segment register.
2370 * @param uSel The new selector value.
2371 *
2372 * @remarks This does -NOT- handle CS or SS.
2373 * @remarks This expects pIemCpu->uCpl to be up to date.
2374 */
2375IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2376{
2377 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2378
2379 /* Null data selector. */
2380 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2381 {
2382 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2383 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2384 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2385 return VINF_SUCCESS;
2386 }
2387
2388 /* Fetch the descriptor. */
2389 IEMSELDESC Desc;
2390 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2391 if (rcStrict != VINF_SUCCESS)
2392 {
2393 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2394 VBOXSTRICTRC_VAL(rcStrict)));
2395 return rcStrict;
2396 }
2397
2398 /* Must be a data segment or readable code segment. */
2399 if ( !Desc.Legacy.Gen.u1DescType
2400 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2401 {
2402 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2403 Desc.Legacy.Gen.u4Type));
2404 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2405 }
2406
2407 /* Check privileges for data segments and non-conforming code segments. */
2408 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2409 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2410 {
2411 /* The RPL and the new CPL must be less than or equal to the DPL. */
2412 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2413 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2414 {
2415 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2416 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2417 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2418 }
2419 }
2420
2421 /* Is it there? */
2422 if (!Desc.Legacy.Gen.u1Present)
2423 {
2424 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2425 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2426 }
2427
2428 /* The base and limit. */
2429 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2430 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2431
2432 /*
2433 * Ok, everything checked out fine. Now set the accessed bit before
2434 * committing the result into the registers.
2435 */
2436 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2437 {
2438 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2439 if (rcStrict != VINF_SUCCESS)
2440 return rcStrict;
2441 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2442 }
2443
2444 /* Commit */
2445 pSReg->Sel = uSel;
2446 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2447 pSReg->u32Limit = cbLimit;
2448 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2449 pSReg->ValidSel = uSel;
2450 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2451 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2452 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2453
2454 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2455 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2456 return VINF_SUCCESS;
2457}
2458
2459
2460/**
2461 * Performs a task switch.
2462 *
2463 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2464 * caller is responsible for performing the necessary checks (like DPL, TSS
2465 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2466 * reference for JMP, CALL, IRET.
2467 *
2468 * If the task switch is the due to a software interrupt or hardware exception,
2469 * the caller is responsible for validating the TSS selector and descriptor. See
2470 * Intel Instruction reference for INT n.
2471 *
2472 * @returns VBox strict status code.
2473 * @param pIemCpu The IEM per CPU instance data.
2474 * @param pCtx The CPU context.
2475 * @param enmTaskSwitch What caused this task switch.
2476 * @param uNextEip The EIP effective after the task switch.
2477 * @param fFlags The flags.
2478 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2479 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2480 * @param SelTSS The TSS selector of the new task.
2481 * @param pNewDescTSS Pointer to the new TSS descriptor.
2482 */
2483IEM_STATIC VBOXSTRICTRC
2484iemTaskSwitch(PIEMCPU pIemCpu,
2485 PCPUMCTX pCtx,
2486 IEMTASKSWITCH enmTaskSwitch,
2487 uint32_t uNextEip,
2488 uint32_t fFlags,
2489 uint16_t uErr,
2490 uint64_t uCr2,
2491 RTSEL SelTSS,
2492 PIEMSELDESC pNewDescTSS)
2493{
2494 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2495 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2496
2497 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2498 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2499 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2500 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2501 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2502
2503 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2504 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2505
2506 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2507 fIsNewTSS386, pCtx->eip, uNextEip));
2508
2509 /* Update CR2 in case it's a page-fault. */
2510 /** @todo This should probably be done much earlier in IEM/PGM. See
2511 * @bugref{5653#c49}. */
2512 if (fFlags & IEM_XCPT_FLAGS_CR2)
2513 pCtx->cr2 = uCr2;
2514
2515 /*
2516 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2517 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2518 */
2519 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2520 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2521 if (uNewTSSLimit < uNewTSSLimitMin)
2522 {
2523 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2524 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2525 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2526 }
2527
2528 /*
2529 * Check the current TSS limit. The last written byte to the current TSS during the
2530 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2531 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2532 *
2533 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2534 * end up with smaller than "legal" TSS limits.
2535 */
2536 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2537 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2538 if (uCurTSSLimit < uCurTSSLimitMin)
2539 {
2540 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2541 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2542 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2543 }
2544
2545 /*
2546 * Verify that the new TSS can be accessed and map it. Map only the required contents
2547 * and not the entire TSS.
2548 */
2549 void *pvNewTSS;
2550 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2551 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2552 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, IntRedirBitmap) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2553 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2554 * not perform correct translation if this happens. See Intel spec. 7.2.1
2555 * "Task-State Segment" */
2556 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2557 if (rcStrict != VINF_SUCCESS)
2558 {
2559 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2560 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2561 return rcStrict;
2562 }
2563
2564 /*
2565 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2566 */
2567 uint32_t u32EFlags = pCtx->eflags.u32;
2568 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2569 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2570 {
2571 PX86DESC pDescCurTSS;
2572 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2573 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2574 if (rcStrict != VINF_SUCCESS)
2575 {
2576 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2577 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2578 return rcStrict;
2579 }
2580
2581 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2582 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2583 if (rcStrict != VINF_SUCCESS)
2584 {
2585 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2586 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2587 return rcStrict;
2588 }
2589
2590 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2591 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2592 {
2593 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2594 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2595 u32EFlags &= ~X86_EFL_NT;
2596 }
2597 }
2598
2599 /*
2600 * Save the CPU state into the current TSS.
2601 */
2602 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2603 if (GCPtrNewTSS == GCPtrCurTSS)
2604 {
2605 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2606 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2607 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2608 }
2609 if (fIsNewTSS386)
2610 {
2611 /*
2612 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2613 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2614 */
2615 void *pvCurTSS32;
2616 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2617 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2618 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2619 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2620 if (rcStrict != VINF_SUCCESS)
2621 {
2622 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2623 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2624 return rcStrict;
2625 }
2626
2627 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2628 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2629 pCurTSS32->eip = uNextEip;
2630 pCurTSS32->eflags = u32EFlags;
2631 pCurTSS32->eax = pCtx->eax;
2632 pCurTSS32->ecx = pCtx->ecx;
2633 pCurTSS32->edx = pCtx->edx;
2634 pCurTSS32->ebx = pCtx->ebx;
2635 pCurTSS32->esp = pCtx->esp;
2636 pCurTSS32->ebp = pCtx->ebp;
2637 pCurTSS32->esi = pCtx->esi;
2638 pCurTSS32->edi = pCtx->edi;
2639 pCurTSS32->es = pCtx->es.Sel;
2640 pCurTSS32->cs = pCtx->cs.Sel;
2641 pCurTSS32->ss = pCtx->ss.Sel;
2642 pCurTSS32->ds = pCtx->ds.Sel;
2643 pCurTSS32->fs = pCtx->fs.Sel;
2644 pCurTSS32->gs = pCtx->gs.Sel;
2645
2646 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2647 if (rcStrict != VINF_SUCCESS)
2648 {
2649 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2650 VBOXSTRICTRC_VAL(rcStrict)));
2651 return rcStrict;
2652 }
2653 }
2654 else
2655 {
2656 /*
2657 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2658 */
2659 void *pvCurTSS16;
2660 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2661 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2662 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2663 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2664 if (rcStrict != VINF_SUCCESS)
2665 {
2666 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2667 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2668 return rcStrict;
2669 }
2670
2671 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2672 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2673 pCurTSS16->ip = uNextEip;
2674 pCurTSS16->flags = u32EFlags;
2675 pCurTSS16->ax = pCtx->ax;
2676 pCurTSS16->cx = pCtx->cx;
2677 pCurTSS16->dx = pCtx->dx;
2678 pCurTSS16->bx = pCtx->bx;
2679 pCurTSS16->sp = pCtx->sp;
2680 pCurTSS16->bp = pCtx->bp;
2681 pCurTSS16->si = pCtx->si;
2682 pCurTSS16->di = pCtx->di;
2683 pCurTSS16->es = pCtx->es.Sel;
2684 pCurTSS16->cs = pCtx->cs.Sel;
2685 pCurTSS16->ss = pCtx->ss.Sel;
2686 pCurTSS16->ds = pCtx->ds.Sel;
2687
2688 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2689 if (rcStrict != VINF_SUCCESS)
2690 {
2691 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2692 VBOXSTRICTRC_VAL(rcStrict)));
2693 return rcStrict;
2694 }
2695 }
2696
2697 /*
2698 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2699 */
2700 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2701 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2702 {
2703 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2704 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2705 pNewTSS->selPrev = pCtx->tr.Sel;
2706 }
2707
2708 /*
2709 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2710 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2711 */
2712 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2713 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2714 bool fNewDebugTrap;
2715 if (fIsNewTSS386)
2716 {
2717 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2718 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2719 uNewEip = pNewTSS32->eip;
2720 uNewEflags = pNewTSS32->eflags;
2721 uNewEax = pNewTSS32->eax;
2722 uNewEcx = pNewTSS32->ecx;
2723 uNewEdx = pNewTSS32->edx;
2724 uNewEbx = pNewTSS32->ebx;
2725 uNewEsp = pNewTSS32->esp;
2726 uNewEbp = pNewTSS32->ebp;
2727 uNewEsi = pNewTSS32->esi;
2728 uNewEdi = pNewTSS32->edi;
2729 uNewES = pNewTSS32->es;
2730 uNewCS = pNewTSS32->cs;
2731 uNewSS = pNewTSS32->ss;
2732 uNewDS = pNewTSS32->ds;
2733 uNewFS = pNewTSS32->fs;
2734 uNewGS = pNewTSS32->gs;
2735 uNewLdt = pNewTSS32->selLdt;
2736 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2737 }
2738 else
2739 {
2740 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2741 uNewCr3 = 0;
2742 uNewEip = pNewTSS16->ip;
2743 uNewEflags = pNewTSS16->flags;
2744 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2745 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2746 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2747 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2748 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2749 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2750 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2751 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2752 uNewES = pNewTSS16->es;
2753 uNewCS = pNewTSS16->cs;
2754 uNewSS = pNewTSS16->ss;
2755 uNewDS = pNewTSS16->ds;
2756 uNewFS = 0;
2757 uNewGS = 0;
2758 uNewLdt = pNewTSS16->selLdt;
2759 fNewDebugTrap = false;
2760 }
2761
2762 if (GCPtrNewTSS == GCPtrCurTSS)
2763 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2764 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2765
2766 /*
2767 * We're done accessing the new TSS.
2768 */
2769 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2770 if (rcStrict != VINF_SUCCESS)
2771 {
2772 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2773 return rcStrict;
2774 }
2775
2776 /*
2777 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2778 */
2779 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2780 {
2781 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2782 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2783 if (rcStrict != VINF_SUCCESS)
2784 {
2785 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2786 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2787 return rcStrict;
2788 }
2789
2790 /* Check that the descriptor indicates the new TSS is available (not busy). */
2791 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2792 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2793 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2794
2795 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2796 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2797 if (rcStrict != VINF_SUCCESS)
2798 {
2799 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2800 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2801 return rcStrict;
2802 }
2803 }
2804
2805 /*
2806 * From this point on, we're technically in the new task. We will defer exceptions
2807 * until the completion of the task switch but before executing any instructions in the new task.
2808 */
2809 pCtx->tr.Sel = SelTSS;
2810 pCtx->tr.ValidSel = SelTSS;
2811 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2812 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2813 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2814 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2815 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2816
2817 /* Set the busy bit in TR. */
2818 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2819 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2820 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2821 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2822 {
2823 uNewEflags |= X86_EFL_NT;
2824 }
2825
2826 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2827 pCtx->cr0 |= X86_CR0_TS;
2828 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2829
2830 pCtx->eip = uNewEip;
2831 pCtx->eax = uNewEax;
2832 pCtx->ecx = uNewEcx;
2833 pCtx->edx = uNewEdx;
2834 pCtx->ebx = uNewEbx;
2835 pCtx->esp = uNewEsp;
2836 pCtx->ebp = uNewEbp;
2837 pCtx->esi = uNewEsi;
2838 pCtx->edi = uNewEdi;
2839
2840 uNewEflags &= X86_EFL_LIVE_MASK;
2841 uNewEflags |= X86_EFL_RA1_MASK;
2842 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2843
2844 /*
2845 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2846 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2847 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2848 */
2849 pCtx->es.Sel = uNewES;
2850 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2851 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2852
2853 pCtx->cs.Sel = uNewCS;
2854 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2855 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2856
2857 pCtx->ss.Sel = uNewSS;
2858 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2859 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2860
2861 pCtx->ds.Sel = uNewDS;
2862 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2863 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2864
2865 pCtx->fs.Sel = uNewFS;
2866 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2867 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2868
2869 pCtx->gs.Sel = uNewGS;
2870 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2871 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2872 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2873
2874 pCtx->ldtr.Sel = uNewLdt;
2875 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2876 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2877 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2878
2879 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2880 {
2881 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2882 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2883 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2884 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2885 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2886 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2887 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2888 }
2889
2890 /*
2891 * Switch CR3 for the new task.
2892 */
2893 if ( fIsNewTSS386
2894 && (pCtx->cr0 & X86_CR0_PG))
2895 {
2896 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2897 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2898 {
2899 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2900 AssertRCSuccessReturn(rc, rc);
2901 }
2902 else
2903 pCtx->cr3 = uNewCr3;
2904
2905 /* Inform PGM. */
2906 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2907 {
2908 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2909 AssertRCReturn(rc, rc);
2910 /* ignore informational status codes */
2911 }
2912 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2913 }
2914
2915 /*
2916 * Switch LDTR for the new task.
2917 */
2918 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2919 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2920 else
2921 {
2922 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2923
2924 IEMSELDESC DescNewLdt;
2925 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2926 if (rcStrict != VINF_SUCCESS)
2927 {
2928 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2929 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2930 return rcStrict;
2931 }
2932 if ( !DescNewLdt.Legacy.Gen.u1Present
2933 || DescNewLdt.Legacy.Gen.u1DescType
2934 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2935 {
2936 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2937 uNewLdt, DescNewLdt.Legacy.u));
2938 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2939 }
2940
2941 pCtx->ldtr.ValidSel = uNewLdt;
2942 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2943 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2944 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2945 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2946 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2947 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2948 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2949 }
2950
2951 IEMSELDESC DescSS;
2952 if (IEM_IS_V86_MODE(pIemCpu))
2953 {
2954 pIemCpu->uCpl = 3;
2955 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
2956 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
2957 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
2958 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
2959 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
2960 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
2961 }
2962 else
2963 {
2964 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
2965
2966 /*
2967 * Load the stack segment for the new task.
2968 */
2969 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2970 {
2971 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2972 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2973 }
2974
2975 /* Fetch the descriptor. */
2976 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
2977 if (rcStrict != VINF_SUCCESS)
2978 {
2979 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2980 VBOXSTRICTRC_VAL(rcStrict)));
2981 return rcStrict;
2982 }
2983
2984 /* SS must be a data segment and writable. */
2985 if ( !DescSS.Legacy.Gen.u1DescType
2986 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2987 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2988 {
2989 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2990 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2991 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2992 }
2993
2994 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2995 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2996 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2997 {
2998 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2999 uNewCpl));
3000 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3001 }
3002
3003 /* Is it there? */
3004 if (!DescSS.Legacy.Gen.u1Present)
3005 {
3006 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3007 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3008 }
3009
3010 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3011 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3012
3013 /* Set the accessed bit before committing the result into SS. */
3014 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3015 {
3016 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3017 if (rcStrict != VINF_SUCCESS)
3018 return rcStrict;
3019 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3020 }
3021
3022 /* Commit SS. */
3023 pCtx->ss.Sel = uNewSS;
3024 pCtx->ss.ValidSel = uNewSS;
3025 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3026 pCtx->ss.u32Limit = cbLimit;
3027 pCtx->ss.u64Base = u64Base;
3028 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3029 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3030
3031 /* CPL has changed, update IEM before loading rest of segments. */
3032 pIemCpu->uCpl = uNewCpl;
3033
3034 /*
3035 * Load the data segments for the new task.
3036 */
3037 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3038 if (rcStrict != VINF_SUCCESS)
3039 return rcStrict;
3040 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3041 if (rcStrict != VINF_SUCCESS)
3042 return rcStrict;
3043 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3044 if (rcStrict != VINF_SUCCESS)
3045 return rcStrict;
3046 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3047 if (rcStrict != VINF_SUCCESS)
3048 return rcStrict;
3049
3050 /*
3051 * Load the code segment for the new task.
3052 */
3053 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3054 {
3055 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3056 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3057 }
3058
3059 /* Fetch the descriptor. */
3060 IEMSELDESC DescCS;
3061 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3062 if (rcStrict != VINF_SUCCESS)
3063 {
3064 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3065 return rcStrict;
3066 }
3067
3068 /* CS must be a code segment. */
3069 if ( !DescCS.Legacy.Gen.u1DescType
3070 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3071 {
3072 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3073 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3074 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3075 }
3076
3077 /* For conforming CS, DPL must be less than or equal to the RPL. */
3078 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3079 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3080 {
3081 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3082 DescCS.Legacy.Gen.u2Dpl));
3083 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3084 }
3085
3086 /* For non-conforming CS, DPL must match RPL. */
3087 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3088 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3089 {
3090 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3091 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3092 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3093 }
3094
3095 /* Is it there? */
3096 if (!DescCS.Legacy.Gen.u1Present)
3097 {
3098 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3099 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3100 }
3101
3102 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3103 u64Base = X86DESC_BASE(&DescCS.Legacy);
3104
3105 /* Set the accessed bit before committing the result into CS. */
3106 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3107 {
3108 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3109 if (rcStrict != VINF_SUCCESS)
3110 return rcStrict;
3111 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3112 }
3113
3114 /* Commit CS. */
3115 pCtx->cs.Sel = uNewCS;
3116 pCtx->cs.ValidSel = uNewCS;
3117 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3118 pCtx->cs.u32Limit = cbLimit;
3119 pCtx->cs.u64Base = u64Base;
3120 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3122 }
3123
3124 /** @todo Debug trap. */
3125 if (fIsNewTSS386 && fNewDebugTrap)
3126 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3127
3128 /*
3129 * Construct the error code masks based on what caused this task switch.
3130 * See Intel Instruction reference for INT.
3131 */
3132 uint16_t uExt;
3133 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3134 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3135 {
3136 uExt = 1;
3137 }
3138 else
3139 uExt = 0;
3140
3141 /*
3142 * Push any error code on to the new stack.
3143 */
3144 if (fFlags & IEM_XCPT_FLAGS_ERR)
3145 {
3146 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3147 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3148 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3149
3150 /* Check that there is sufficient space on the stack. */
3151 /** @todo Factor out segment limit checking for normal/expand down segments
3152 * into a separate function. */
3153 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3154 {
3155 if ( pCtx->esp - 1 > cbLimitSS
3156 || pCtx->esp < cbStackFrame)
3157 {
3158 /** @todo Intel says #SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3159 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3160 cbStackFrame));
3161 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3162 }
3163 }
3164 else
3165 {
3166 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3167 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3168 {
3169 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3170 cbStackFrame));
3171 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3172 }
3173 }
3174
3175
3176 if (fIsNewTSS386)
3177 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3178 else
3179 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3180 if (rcStrict != VINF_SUCCESS)
3181 {
3182 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3183 VBOXSTRICTRC_VAL(rcStrict)));
3184 return rcStrict;
3185 }
3186 }
3187
3188 /* Check the new EIP against the new CS limit. */
3189 if (pCtx->eip > pCtx->cs.u32Limit)
3190 {
3191 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3192 pCtx->eip, pCtx->cs.u32Limit));
3193 /** @todo Intel says #GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3194 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3195 }
3196
3197 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3198 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3199}
3200
3201
3202/**
3203 * Implements exceptions and interrupts for protected mode.
3204 *
3205 * @returns VBox strict status code.
3206 * @param pIemCpu The IEM per CPU instance data.
3207 * @param pCtx The CPU context.
3208 * @param cbInstr The number of bytes to offset rIP by in the return
3209 * address.
3210 * @param u8Vector The interrupt / exception vector number.
3211 * @param fFlags The flags.
3212 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3213 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3214 */
3215IEM_STATIC VBOXSTRICTRC
3216iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3217 PCPUMCTX pCtx,
3218 uint8_t cbInstr,
3219 uint8_t u8Vector,
3220 uint32_t fFlags,
3221 uint16_t uErr,
3222 uint64_t uCr2)
3223{
3224 /*
3225 * Read the IDT entry.
3226 */
3227 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3228 {
3229 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3230 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3231 }
3232 X86DESC Idte;
3233 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3234 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3235 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3236 return rcStrict;
3237 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3238 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3239 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3240
3241 /*
3242 * Check the descriptor type, DPL and such.
3243 * ASSUMES this is done in the same order as described for call-gate calls.
3244 */
3245 if (Idte.Gate.u1DescType)
3246 {
3247 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3248 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3249 }
3250 bool fTaskGate = false;
3251 uint8_t f32BitGate = true;
3252 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3253 switch (Idte.Gate.u4Type)
3254 {
3255 case X86_SEL_TYPE_SYS_UNDEFINED:
3256 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3257 case X86_SEL_TYPE_SYS_LDT:
3258 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3259 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3260 case X86_SEL_TYPE_SYS_UNDEFINED2:
3261 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3262 case X86_SEL_TYPE_SYS_UNDEFINED3:
3263 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3264 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3265 case X86_SEL_TYPE_SYS_UNDEFINED4:
3266 {
3267 /** @todo check what actually happens when the type is wrong...
3268 * esp. call gates. */
3269 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3270 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3271 }
3272
3273 case X86_SEL_TYPE_SYS_286_INT_GATE:
3274 f32BitGate = false;
3275 case X86_SEL_TYPE_SYS_386_INT_GATE:
3276 fEflToClear |= X86_EFL_IF;
3277 break;
3278
3279 case X86_SEL_TYPE_SYS_TASK_GATE:
3280 fTaskGate = true;
3281#ifndef IEM_IMPLEMENTS_TASKSWITCH
3282 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3283#endif
3284 break;
3285
3286 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3287 f32BitGate = false;
3288 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3289 break;
3290
3291 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3292 }
3293
3294 /* Check DPL against CPL if applicable. */
3295 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3296 {
3297 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3298 {
3299 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3300 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3301 }
3302 }
3303
3304 /* Is it there? */
3305 if (!Idte.Gate.u1Present)
3306 {
3307 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3308 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3309 }
3310
3311 /* Is it a task-gate? */
3312 if (fTaskGate)
3313 {
3314 /*
3315 * Construct the error code masks based on what caused this task switch.
3316 * See Intel Instruction reference for INT.
3317 */
3318 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3319 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3320 RTSEL SelTSS = Idte.Gate.u16Sel;
3321
3322 /*
3323 * Fetch the TSS descriptor in the GDT.
3324 */
3325 IEMSELDESC DescTSS;
3326 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3327 if (rcStrict != VINF_SUCCESS)
3328 {
3329 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3330 VBOXSTRICTRC_VAL(rcStrict)));
3331 return rcStrict;
3332 }
3333
3334 /* The TSS descriptor must be a system segment and be available (not busy). */
3335 if ( DescTSS.Legacy.Gen.u1DescType
3336 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3337 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3338 {
3339 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3340 u8Vector, SelTSS, DescTSS.Legacy.au64));
3341 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3342 }
3343
3344 /* The TSS must be present. */
3345 if (!DescTSS.Legacy.Gen.u1Present)
3346 {
3347 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3348 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3349 }
3350
3351 /* Do the actual task switch. */
3352 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3353 }
3354
3355 /* A null CS is bad. */
3356 RTSEL NewCS = Idte.Gate.u16Sel;
3357 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3358 {
3359 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3360 return iemRaiseGeneralProtectionFault0(pIemCpu);
3361 }
3362
3363 /* Fetch the descriptor for the new CS. */
3364 IEMSELDESC DescCS;
3365 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3366 if (rcStrict != VINF_SUCCESS)
3367 {
3368 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3369 return rcStrict;
3370 }
3371
3372 /* Must be a code segment. */
3373 if (!DescCS.Legacy.Gen.u1DescType)
3374 {
3375 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3376 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3377 }
3378 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3379 {
3380 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3381 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3382 }
3383
3384 /* Don't allow lowering the privilege level. */
3385 /** @todo Does the lowering of privileges apply to software interrupts
3386 * only? This has bearings on the more-privileged or
3387 * same-privilege stack behavior further down. A testcase would
3388 * be nice. */
3389 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3390 {
3391 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3392 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3393 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3394 }
3395
3396 /* Make sure the selector is present. */
3397 if (!DescCS.Legacy.Gen.u1Present)
3398 {
3399 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3400 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3401 }
3402
3403 /* Check the new EIP against the new CS limit. */
3404 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3405 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3406 ? Idte.Gate.u16OffsetLow
3407 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3408 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3409 if (uNewEip > cbLimitCS)
3410 {
3411 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3412 u8Vector, uNewEip, cbLimitCS, NewCS));
3413 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3414 }
3415
3416 /* Calc the flag image to push. */
3417 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3418 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3419 fEfl &= ~X86_EFL_RF;
3420 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3421 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3422
3423 /* From V8086 mode only go to CPL 0. */
3424 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3425 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3426 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3427 {
3428 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3429 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3430 }
3431
3432 /*
3433 * If the privilege level changes, we need to get a new stack from the TSS.
3434 * This in turns means validating the new SS and ESP...
3435 */
3436 if (uNewCpl != pIemCpu->uCpl)
3437 {
3438 RTSEL NewSS;
3439 uint32_t uNewEsp;
3440 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3441 if (rcStrict != VINF_SUCCESS)
3442 return rcStrict;
3443
3444 IEMSELDESC DescSS;
3445 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3446 if (rcStrict != VINF_SUCCESS)
3447 return rcStrict;
3448
3449 /* Check that there is sufficient space for the stack frame. */
3450 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3451 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3452 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3453 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3454
3455 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3456 {
3457 if ( uNewEsp - 1 > cbLimitSS
3458 || uNewEsp < cbStackFrame)
3459 {
3460 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3461 u8Vector, NewSS, uNewEsp, cbStackFrame));
3462 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3463 }
3464 }
3465 else
3466 {
3467 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3468 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3469 {
3470 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3471 u8Vector, NewSS, uNewEsp, cbStackFrame));
3472 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3473 }
3474 }
3475
3476 /*
3477 * Start making changes.
3478 */
3479
3480 /* Create the stack frame. */
3481 RTPTRUNION uStackFrame;
3482 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3483 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3484 if (rcStrict != VINF_SUCCESS)
3485 return rcStrict;
3486 void * const pvStackFrame = uStackFrame.pv;
3487 if (f32BitGate)
3488 {
3489 if (fFlags & IEM_XCPT_FLAGS_ERR)
3490 *uStackFrame.pu32++ = uErr;
3491 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3492 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3493 uStackFrame.pu32[2] = fEfl;
3494 uStackFrame.pu32[3] = pCtx->esp;
3495 uStackFrame.pu32[4] = pCtx->ss.Sel;
3496 if (fEfl & X86_EFL_VM)
3497 {
3498 uStackFrame.pu32[1] = pCtx->cs.Sel;
3499 uStackFrame.pu32[5] = pCtx->es.Sel;
3500 uStackFrame.pu32[6] = pCtx->ds.Sel;
3501 uStackFrame.pu32[7] = pCtx->fs.Sel;
3502 uStackFrame.pu32[8] = pCtx->gs.Sel;
3503 }
3504 }
3505 else
3506 {
3507 if (fFlags & IEM_XCPT_FLAGS_ERR)
3508 *uStackFrame.pu16++ = uErr;
3509 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3510 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3511 uStackFrame.pu16[2] = fEfl;
3512 uStackFrame.pu16[3] = pCtx->sp;
3513 uStackFrame.pu16[4] = pCtx->ss.Sel;
3514 if (fEfl & X86_EFL_VM)
3515 {
3516 uStackFrame.pu16[1] = pCtx->cs.Sel;
3517 uStackFrame.pu16[5] = pCtx->es.Sel;
3518 uStackFrame.pu16[6] = pCtx->ds.Sel;
3519 uStackFrame.pu16[7] = pCtx->fs.Sel;
3520 uStackFrame.pu16[8] = pCtx->gs.Sel;
3521 }
3522 }
3523 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3524 if (rcStrict != VINF_SUCCESS)
3525 return rcStrict;
3526
3527 /* Mark the selectors 'accessed' (hope this is the correct time). */
3528 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3529 * after pushing the stack frame? (Write protect the gdt + stack to
3530 * find out.) */
3531 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3532 {
3533 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3534 if (rcStrict != VINF_SUCCESS)
3535 return rcStrict;
3536 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3537 }
3538
3539 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3540 {
3541 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3542 if (rcStrict != VINF_SUCCESS)
3543 return rcStrict;
3544 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3545 }
3546
3547 /*
3548 * Start comitting the register changes (joins with the DPL=CPL branch).
3549 */
3550 pCtx->ss.Sel = NewSS;
3551 pCtx->ss.ValidSel = NewSS;
3552 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3553 pCtx->ss.u32Limit = cbLimitSS;
3554 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3555 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3556 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
3557 pIemCpu->uCpl = uNewCpl;
3558
3559 if (fEfl & X86_EFL_VM)
3560 {
3561 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3562 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3563 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3564 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3565 }
3566 }
3567 /*
3568 * Same privilege, no stack change and smaller stack frame.
3569 */
3570 else
3571 {
3572 uint64_t uNewRsp;
3573 RTPTRUNION uStackFrame;
3574 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3575 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3576 if (rcStrict != VINF_SUCCESS)
3577 return rcStrict;
3578 void * const pvStackFrame = uStackFrame.pv;
3579
3580 if (f32BitGate)
3581 {
3582 if (fFlags & IEM_XCPT_FLAGS_ERR)
3583 *uStackFrame.pu32++ = uErr;
3584 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3585 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3586 uStackFrame.pu32[2] = fEfl;
3587 }
3588 else
3589 {
3590 if (fFlags & IEM_XCPT_FLAGS_ERR)
3591 *uStackFrame.pu16++ = uErr;
3592 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3593 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3594 uStackFrame.pu16[2] = fEfl;
3595 }
3596 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3597 if (rcStrict != VINF_SUCCESS)
3598 return rcStrict;
3599
3600 /* Mark the CS selector as 'accessed'. */
3601 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3602 {
3603 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3604 if (rcStrict != VINF_SUCCESS)
3605 return rcStrict;
3606 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3607 }
3608
3609 /*
3610 * Start committing the register changes (joins with the other branch).
3611 */
3612 pCtx->rsp = uNewRsp;
3613 }
3614
3615 /* ... register committing continues. */
3616 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3617 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3618 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3619 pCtx->cs.u32Limit = cbLimitCS;
3620 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3621 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3622
3623 pCtx->rip = uNewEip;
3624 fEfl &= ~fEflToClear;
3625 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3626
3627 if (fFlags & IEM_XCPT_FLAGS_CR2)
3628 pCtx->cr2 = uCr2;
3629
3630 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3631 iemRaiseXcptAdjustState(pCtx, u8Vector);
3632
3633 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3634}
3635
3636
3637/**
3638 * Implements exceptions and interrupts for long mode.
3639 *
3640 * @returns VBox strict status code.
3641 * @param pIemCpu The IEM per CPU instance data.
3642 * @param pCtx The CPU context.
3643 * @param cbInstr The number of bytes to offset rIP by in the return
3644 * address.
3645 * @param u8Vector The interrupt / exception vector number.
3646 * @param fFlags The flags.
3647 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3648 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3649 */
3650IEM_STATIC VBOXSTRICTRC
3651iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3652 PCPUMCTX pCtx,
3653 uint8_t cbInstr,
3654 uint8_t u8Vector,
3655 uint32_t fFlags,
3656 uint16_t uErr,
3657 uint64_t uCr2)
3658{
3659 /*
3660 * Read the IDT entry.
3661 */
3662 uint16_t offIdt = (uint16_t)u8Vector << 4;
3663 if (pCtx->idtr.cbIdt < offIdt + 7)
3664 {
3665 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3666 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3667 }
3668 X86DESC64 Idte;
3669 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3670 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3671 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3672 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3673 return rcStrict;
3674 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3675 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3676 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3677
3678 /*
3679 * Check the descriptor type, DPL and such.
3680 * ASSUMES this is done in the same order as described for call-gate calls.
3681 */
3682 if (Idte.Gate.u1DescType)
3683 {
3684 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3685 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3686 }
3687 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3688 switch (Idte.Gate.u4Type)
3689 {
3690 case AMD64_SEL_TYPE_SYS_INT_GATE:
3691 fEflToClear |= X86_EFL_IF;
3692 break;
3693 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3694 break;
3695
3696 default:
3697 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3698 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3699 }
3700
3701 /* Check DPL against CPL if applicable. */
3702 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3703 {
3704 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3705 {
3706 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3707 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3708 }
3709 }
3710
3711 /* Is it there? */
3712 if (!Idte.Gate.u1Present)
3713 {
3714 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3715 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3716 }
3717
3718 /* A null CS is bad. */
3719 RTSEL NewCS = Idte.Gate.u16Sel;
3720 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3721 {
3722 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3723 return iemRaiseGeneralProtectionFault0(pIemCpu);
3724 }
3725
3726 /* Fetch the descriptor for the new CS. */
3727 IEMSELDESC DescCS;
3728 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3729 if (rcStrict != VINF_SUCCESS)
3730 {
3731 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3732 return rcStrict;
3733 }
3734
3735 /* Must be a 64-bit code segment. */
3736 if (!DescCS.Long.Gen.u1DescType)
3737 {
3738 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3739 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3740 }
3741 if ( !DescCS.Long.Gen.u1Long
3742 || DescCS.Long.Gen.u1DefBig
3743 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3744 {
3745 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3746 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3747 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3748 }
3749
3750 /* Don't allow lowering the privilege level. For non-conforming CS
3751 selectors, the CS.DPL sets the privilege level the trap/interrupt
3752 handler runs at. For conforming CS selectors, the CPL remains
3753 unchanged, but the CS.DPL must be <= CPL. */
3754 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3755 * when CPU in Ring-0. Result \#GP? */
3756 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3757 {
3758 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3759 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3760 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3761 }
3762
3763
3764 /* Make sure the selector is present. */
3765 if (!DescCS.Legacy.Gen.u1Present)
3766 {
3767 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3768 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3769 }
3770
3771 /* Check that the new RIP is canonical. */
3772 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3773 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3774 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3775 if (!IEM_IS_CANONICAL(uNewRip))
3776 {
3777 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3778 return iemRaiseGeneralProtectionFault0(pIemCpu);
3779 }
3780
3781 /*
3782 * If the privilege level changes or if the IST isn't zero, we need to get
3783 * a new stack from the TSS.
3784 */
3785 uint64_t uNewRsp;
3786 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3787 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3788 if ( uNewCpl != pIemCpu->uCpl
3789 || Idte.Gate.u3IST != 0)
3790 {
3791 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3792 if (rcStrict != VINF_SUCCESS)
3793 return rcStrict;
3794 }
3795 else
3796 uNewRsp = pCtx->rsp;
3797 uNewRsp &= ~(uint64_t)0xf;
3798
3799 /*
3800 * Calc the flag image to push.
3801 */
3802 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3803 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3804 fEfl &= ~X86_EFL_RF;
3805 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3806 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3807
3808 /*
3809 * Start making changes.
3810 */
3811
3812 /* Create the stack frame. */
3813 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3814 RTPTRUNION uStackFrame;
3815 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3816 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3817 if (rcStrict != VINF_SUCCESS)
3818 return rcStrict;
3819 void * const pvStackFrame = uStackFrame.pv;
3820
3821 if (fFlags & IEM_XCPT_FLAGS_ERR)
3822 *uStackFrame.pu64++ = uErr;
3823 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3824 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3825 uStackFrame.pu64[2] = fEfl;
3826 uStackFrame.pu64[3] = pCtx->rsp;
3827 uStackFrame.pu64[4] = pCtx->ss.Sel;
3828 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3829 if (rcStrict != VINF_SUCCESS)
3830 return rcStrict;
3831
3832 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3833 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3834 * after pushing the stack frame? (Write protect the gdt + stack to
3835 * find out.) */
3836 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3837 {
3838 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3839 if (rcStrict != VINF_SUCCESS)
3840 return rcStrict;
3841 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3842 }
3843
3844 /*
3845 * Start comitting the register changes.
3846 */
3847 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3848 * hidden registers when interrupting 32-bit or 16-bit code! */
3849 if (uNewCpl != pIemCpu->uCpl)
3850 {
3851 pCtx->ss.Sel = 0 | uNewCpl;
3852 pCtx->ss.ValidSel = 0 | uNewCpl;
3853 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3854 pCtx->ss.u32Limit = UINT32_MAX;
3855 pCtx->ss.u64Base = 0;
3856 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3857 }
3858 pCtx->rsp = uNewRsp - cbStackFrame;
3859 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3860 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3861 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3862 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3863 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3864 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3865 pCtx->rip = uNewRip;
3866 pIemCpu->uCpl = uNewCpl;
3867
3868 fEfl &= ~fEflToClear;
3869 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3870
3871 if (fFlags & IEM_XCPT_FLAGS_CR2)
3872 pCtx->cr2 = uCr2;
3873
3874 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3875 iemRaiseXcptAdjustState(pCtx, u8Vector);
3876
3877 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3878}
3879
3880
3881/**
3882 * Implements exceptions and interrupts.
3883 *
3884 * All exceptions and interrupts goes thru this function!
3885 *
3886 * @returns VBox strict status code.
3887 * @param pIemCpu The IEM per CPU instance data.
3888 * @param cbInstr The number of bytes to offset rIP by in the return
3889 * address.
3890 * @param u8Vector The interrupt / exception vector number.
3891 * @param fFlags The flags.
3892 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3893 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3894 */
3895DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3896iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3897 uint8_t cbInstr,
3898 uint8_t u8Vector,
3899 uint32_t fFlags,
3900 uint16_t uErr,
3901 uint64_t uCr2)
3902{
3903 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3904#ifdef IN_RING0
3905 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3906 AssertRCReturn(rc, rc);
3907#endif
3908
3909 /*
3910 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3911 */
3912 if ( pCtx->eflags.Bits.u1VM
3913 && pCtx->eflags.Bits.u2IOPL != 3
3914 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3915 && (pCtx->cr0 & X86_CR0_PE) )
3916 {
3917 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3918 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3919 u8Vector = X86_XCPT_GP;
3920 uErr = 0;
3921 }
3922#ifdef DBGFTRACE_ENABLED
3923 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3924 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3925 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3926#endif
3927
3928 /*
3929 * Do recursion accounting.
3930 */
3931 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3932 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3933 if (pIemCpu->cXcptRecursions == 0)
3934 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3935 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3936 else
3937 {
3938 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3939 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3940
3941 /** @todo double and tripple faults. */
3942 if (pIemCpu->cXcptRecursions >= 3)
3943 {
3944#ifdef DEBUG_bird
3945 AssertFailed();
3946#endif
3947 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3948 }
3949
3950 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
3951 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
3952 {
3953 ....
3954 } */
3955 }
3956 pIemCpu->cXcptRecursions++;
3957 pIemCpu->uCurXcpt = u8Vector;
3958 pIemCpu->fCurXcpt = fFlags;
3959
3960 /*
3961 * Extensive logging.
3962 */
3963#if defined(LOG_ENABLED) && defined(IN_RING3)
3964 if (LogIs3Enabled())
3965 {
3966 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3967 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3968 char szRegs[4096];
3969 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3970 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3971 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3972 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3973 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3974 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3975 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3976 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3977 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3978 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3979 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3980 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3981 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3982 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3983 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3984 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3985 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3986 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3987 " efer=%016VR{efer}\n"
3988 " pat=%016VR{pat}\n"
3989 " sf_mask=%016VR{sf_mask}\n"
3990 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3991 " lstar=%016VR{lstar}\n"
3992 " star=%016VR{star} cstar=%016VR{cstar}\n"
3993 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3994 );
3995
3996 char szInstr[256];
3997 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3998 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3999 szInstr, sizeof(szInstr), NULL);
4000 Log3(("%s%s\n", szRegs, szInstr));
4001 }
4002#endif /* LOG_ENABLED */
4003
4004 /*
4005 * Call the mode specific worker function.
4006 */
4007 VBOXSTRICTRC rcStrict;
4008 if (!(pCtx->cr0 & X86_CR0_PE))
4009 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4010 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4011 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4012 else
4013 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4014
4015 /*
4016 * Unwind.
4017 */
4018 pIemCpu->cXcptRecursions--;
4019 pIemCpu->uCurXcpt = uPrevXcpt;
4020 pIemCpu->fCurXcpt = fPrevXcpt;
4021 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4022 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4023 return rcStrict;
4024}
4025
4026
4027/** \#DE - 00. */
4028DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4029{
4030 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4031}
4032
4033
4034/** \#DB - 01.
4035 * @note This automatically clear DR7.GD. */
4036DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4037{
4038 /** @todo set/clear RF. */
4039 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4040 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4041}
4042
4043
4044/** \#UD - 06. */
4045DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4046{
4047 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4048}
4049
4050
4051/** \#NM - 07. */
4052DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4053{
4054 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4055}
4056
4057
4058/** \#TS(err) - 0a. */
4059DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4060{
4061 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4062}
4063
4064
4065/** \#TS(tr) - 0a. */
4066DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4067{
4068 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4069 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4070}
4071
4072
4073/** \#TS(0) - 0a. */
4074DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4075{
4076 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4077 0, 0);
4078}
4079
4080
4081/** \#TS(err) - 0a. */
4082DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4083{
4084 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4085 uSel & X86_SEL_MASK_OFF_RPL, 0);
4086}
4087
4088
4089/** \#NP(err) - 0b. */
4090DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4091{
4092 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4093}
4094
4095
4096/** \#NP(seg) - 0b. */
4097DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4098{
4099 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4100 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4101}
4102
4103
4104/** \#NP(sel) - 0b. */
4105DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4106{
4107 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4108 uSel & ~X86_SEL_RPL, 0);
4109}
4110
4111
4112/** \#SS(seg) - 0c. */
4113DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4114{
4115 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4116 uSel & ~X86_SEL_RPL, 0);
4117}
4118
4119
4120/** \#SS(err) - 0c. */
4121DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4122{
4123 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4124}
4125
4126
4127/** \#GP(n) - 0d. */
4128DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4129{
4130 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4131}
4132
4133
4134/** \#GP(0) - 0d. */
4135DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4136{
4137 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4138}
4139
4140
4141/** \#GP(sel) - 0d. */
4142DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4143{
4144 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4145 Sel & ~X86_SEL_RPL, 0);
4146}
4147
4148
4149/** \#GP(0) - 0d. */
4150DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4151{
4152 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4153}
4154
4155
4156/** \#GP(sel) - 0d. */
4157DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4158{
4159 NOREF(iSegReg); NOREF(fAccess);
4160 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4161 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4162}
4163
4164
4165/** \#GP(sel) - 0d. */
4166DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4167{
4168 NOREF(Sel);
4169 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4170}
4171
4172
4173/** \#GP(sel) - 0d. */
4174DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4175{
4176 NOREF(iSegReg); NOREF(fAccess);
4177 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4178}
4179
4180
4181/** \#PF(n) - 0e. */
4182DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4183{
4184 uint16_t uErr;
4185 switch (rc)
4186 {
4187 case VERR_PAGE_NOT_PRESENT:
4188 case VERR_PAGE_TABLE_NOT_PRESENT:
4189 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4190 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4191 uErr = 0;
4192 break;
4193
4194 default:
4195 AssertMsgFailed(("%Rrc\n", rc));
4196 case VERR_ACCESS_DENIED:
4197 uErr = X86_TRAP_PF_P;
4198 break;
4199
4200 /** @todo reserved */
4201 }
4202
4203 if (pIemCpu->uCpl == 3)
4204 uErr |= X86_TRAP_PF_US;
4205
4206 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4207 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4208 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4209 uErr |= X86_TRAP_PF_ID;
4210
4211#if 0 /* This is so much non-sense, really. Why was it done like that? */
4212 /* Note! RW access callers reporting a WRITE protection fault, will clear
4213 the READ flag before calling. So, read-modify-write accesses (RW)
4214 can safely be reported as READ faults. */
4215 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4216 uErr |= X86_TRAP_PF_RW;
4217#else
4218 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4219 {
4220 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4221 uErr |= X86_TRAP_PF_RW;
4222 }
4223#endif
4224
4225 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4226 uErr, GCPtrWhere);
4227}
4228
4229
4230/** \#MF(0) - 10. */
4231DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4232{
4233 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4234}
4235
4236
4237/** \#AC(0) - 11. */
4238DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4239{
4240 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4241}
4242
4243
4244/**
4245 * Macro for calling iemCImplRaiseDivideError().
4246 *
4247 * This enables us to add/remove arguments and force different levels of
4248 * inlining as we wish.
4249 *
4250 * @return Strict VBox status code.
4251 */
4252#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4253IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4254{
4255 NOREF(cbInstr);
4256 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4257}
4258
4259
4260/**
4261 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4262 *
4263 * This enables us to add/remove arguments and force different levels of
4264 * inlining as we wish.
4265 *
4266 * @return Strict VBox status code.
4267 */
4268#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4269IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4270{
4271 NOREF(cbInstr);
4272 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4273}
4274
4275
4276/**
4277 * Macro for calling iemCImplRaiseInvalidOpcode().
4278 *
4279 * This enables us to add/remove arguments and force different levels of
4280 * inlining as we wish.
4281 *
4282 * @return Strict VBox status code.
4283 */
4284#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4285IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4286{
4287 NOREF(cbInstr);
4288 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4289}
4290
4291
4292/** @} */
4293
4294
4295/*
4296 *
4297 * Helpers routines.
4298 * Helpers routines.
4299 * Helpers routines.
4300 *
4301 */
4302
4303/**
4304 * Recalculates the effective operand size.
4305 *
4306 * @param pIemCpu The IEM state.
4307 */
4308IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4309{
4310 switch (pIemCpu->enmCpuMode)
4311 {
4312 case IEMMODE_16BIT:
4313 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4314 break;
4315 case IEMMODE_32BIT:
4316 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4317 break;
4318 case IEMMODE_64BIT:
4319 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4320 {
4321 case 0:
4322 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4323 break;
4324 case IEM_OP_PRF_SIZE_OP:
4325 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4326 break;
4327 case IEM_OP_PRF_SIZE_REX_W:
4328 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4329 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4330 break;
4331 }
4332 break;
4333 default:
4334 AssertFailed();
4335 }
4336}
4337
4338
4339/**
4340 * Sets the default operand size to 64-bit and recalculates the effective
4341 * operand size.
4342 *
4343 * @param pIemCpu The IEM state.
4344 */
4345IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4346{
4347 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4348 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4349 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4350 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4351 else
4352 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4353}
4354
4355
4356/*
4357 *
4358 * Common opcode decoders.
4359 * Common opcode decoders.
4360 * Common opcode decoders.
4361 *
4362 */
4363//#include <iprt/mem.h>
4364
4365/**
4366 * Used to add extra details about a stub case.
4367 * @param pIemCpu The IEM per CPU state.
4368 */
4369IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4370{
4371#if defined(LOG_ENABLED) && defined(IN_RING3)
4372 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4373 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4374 char szRegs[4096];
4375 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4376 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4377 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4378 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4379 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4380 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4381 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4382 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4383 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4384 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4385 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4386 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4387 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4388 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4389 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4390 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4391 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4392 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4393 " efer=%016VR{efer}\n"
4394 " pat=%016VR{pat}\n"
4395 " sf_mask=%016VR{sf_mask}\n"
4396 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4397 " lstar=%016VR{lstar}\n"
4398 " star=%016VR{star} cstar=%016VR{cstar}\n"
4399 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4400 );
4401
4402 char szInstr[256];
4403 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4404 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4405 szInstr, sizeof(szInstr), NULL);
4406
4407 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4408#else
4409 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4410#endif
4411}
4412
4413/**
4414 * Complains about a stub.
4415 *
4416 * Providing two versions of this macro, one for daily use and one for use when
4417 * working on IEM.
4418 */
4419#if 0
4420# define IEMOP_BITCH_ABOUT_STUB() \
4421 do { \
4422 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4423 iemOpStubMsg2(pIemCpu); \
4424 RTAssertPanic(); \
4425 } while (0)
4426#else
4427# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4428#endif
4429
4430/** Stubs an opcode. */
4431#define FNIEMOP_STUB(a_Name) \
4432 FNIEMOP_DEF(a_Name) \
4433 { \
4434 IEMOP_BITCH_ABOUT_STUB(); \
4435 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4436 } \
4437 typedef int ignore_semicolon
4438
4439/** Stubs an opcode. */
4440#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4441 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4442 { \
4443 IEMOP_BITCH_ABOUT_STUB(); \
4444 NOREF(a_Name0); \
4445 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4446 } \
4447 typedef int ignore_semicolon
4448
4449/** Stubs an opcode which currently should raise \#UD. */
4450#define FNIEMOP_UD_STUB(a_Name) \
4451 FNIEMOP_DEF(a_Name) \
4452 { \
4453 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4454 return IEMOP_RAISE_INVALID_OPCODE(); \
4455 } \
4456 typedef int ignore_semicolon
4457
4458/** Stubs an opcode which currently should raise \#UD. */
4459#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4460 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4461 { \
4462 NOREF(a_Name0); \
4463 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4464 return IEMOP_RAISE_INVALID_OPCODE(); \
4465 } \
4466 typedef int ignore_semicolon
4467
4468
4469
4470/** @name Register Access.
4471 * @{
4472 */
4473
4474/**
4475 * Gets a reference (pointer) to the specified hidden segment register.
4476 *
4477 * @returns Hidden register reference.
4478 * @param pIemCpu The per CPU data.
4479 * @param iSegReg The segment register.
4480 */
4481IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4482{
4483 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4484 PCPUMSELREG pSReg;
4485 switch (iSegReg)
4486 {
4487 case X86_SREG_ES: pSReg = &pCtx->es; break;
4488 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4489 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4490 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4491 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4492 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4493 default:
4494 AssertFailedReturn(NULL);
4495 }
4496#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4497 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4498 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4499#else
4500 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4501#endif
4502 return pSReg;
4503}
4504
4505
4506/**
4507 * Gets a reference (pointer) to the specified segment register (the selector
4508 * value).
4509 *
4510 * @returns Pointer to the selector variable.
4511 * @param pIemCpu The per CPU data.
4512 * @param iSegReg The segment register.
4513 */
4514IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4515{
4516 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4517 switch (iSegReg)
4518 {
4519 case X86_SREG_ES: return &pCtx->es.Sel;
4520 case X86_SREG_CS: return &pCtx->cs.Sel;
4521 case X86_SREG_SS: return &pCtx->ss.Sel;
4522 case X86_SREG_DS: return &pCtx->ds.Sel;
4523 case X86_SREG_FS: return &pCtx->fs.Sel;
4524 case X86_SREG_GS: return &pCtx->gs.Sel;
4525 }
4526 AssertFailedReturn(NULL);
4527}
4528
4529
4530/**
4531 * Fetches the selector value of a segment register.
4532 *
4533 * @returns The selector value.
4534 * @param pIemCpu The per CPU data.
4535 * @param iSegReg The segment register.
4536 */
4537IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4538{
4539 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4540 switch (iSegReg)
4541 {
4542 case X86_SREG_ES: return pCtx->es.Sel;
4543 case X86_SREG_CS: return pCtx->cs.Sel;
4544 case X86_SREG_SS: return pCtx->ss.Sel;
4545 case X86_SREG_DS: return pCtx->ds.Sel;
4546 case X86_SREG_FS: return pCtx->fs.Sel;
4547 case X86_SREG_GS: return pCtx->gs.Sel;
4548 }
4549 AssertFailedReturn(0xffff);
4550}
4551
4552
4553/**
4554 * Gets a reference (pointer) to the specified general register.
4555 *
4556 * @returns Register reference.
4557 * @param pIemCpu The per CPU data.
4558 * @param iReg The general register.
4559 */
4560IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4561{
4562 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4563 switch (iReg)
4564 {
4565 case X86_GREG_xAX: return &pCtx->rax;
4566 case X86_GREG_xCX: return &pCtx->rcx;
4567 case X86_GREG_xDX: return &pCtx->rdx;
4568 case X86_GREG_xBX: return &pCtx->rbx;
4569 case X86_GREG_xSP: return &pCtx->rsp;
4570 case X86_GREG_xBP: return &pCtx->rbp;
4571 case X86_GREG_xSI: return &pCtx->rsi;
4572 case X86_GREG_xDI: return &pCtx->rdi;
4573 case X86_GREG_x8: return &pCtx->r8;
4574 case X86_GREG_x9: return &pCtx->r9;
4575 case X86_GREG_x10: return &pCtx->r10;
4576 case X86_GREG_x11: return &pCtx->r11;
4577 case X86_GREG_x12: return &pCtx->r12;
4578 case X86_GREG_x13: return &pCtx->r13;
4579 case X86_GREG_x14: return &pCtx->r14;
4580 case X86_GREG_x15: return &pCtx->r15;
4581 }
4582 AssertFailedReturn(NULL);
4583}
4584
4585
4586/**
4587 * Gets a reference (pointer) to the specified 8-bit general register.
4588 *
4589 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4590 *
4591 * @returns Register reference.
4592 * @param pIemCpu The per CPU data.
4593 * @param iReg The register.
4594 */
4595IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4596{
4597 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4598 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4599
4600 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4601 if (iReg >= 4)
4602 pu8Reg++;
4603 return pu8Reg;
4604}
4605
4606
4607/**
4608 * Fetches the value of a 8-bit general register.
4609 *
4610 * @returns The register value.
4611 * @param pIemCpu The per CPU data.
4612 * @param iReg The register.
4613 */
4614IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4615{
4616 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4617 return *pbSrc;
4618}
4619
4620
4621/**
4622 * Fetches the value of a 16-bit general register.
4623 *
4624 * @returns The register value.
4625 * @param pIemCpu The per CPU data.
4626 * @param iReg The register.
4627 */
4628IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4629{
4630 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4631}
4632
4633
4634/**
4635 * Fetches the value of a 32-bit general register.
4636 *
4637 * @returns The register value.
4638 * @param pIemCpu The per CPU data.
4639 * @param iReg The register.
4640 */
4641IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4642{
4643 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4644}
4645
4646
4647/**
4648 * Fetches the value of a 64-bit general register.
4649 *
4650 * @returns The register value.
4651 * @param pIemCpu The per CPU data.
4652 * @param iReg The register.
4653 */
4654IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4655{
4656 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4657}
4658
4659
4660/**
4661 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4662 *
4663 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4664 * segment limit.
4665 *
4666 * @param pIemCpu The per CPU data.
4667 * @param offNextInstr The offset of the next instruction.
4668 */
4669IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4670{
4671 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4672 switch (pIemCpu->enmEffOpSize)
4673 {
4674 case IEMMODE_16BIT:
4675 {
4676 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4677 if ( uNewIp > pCtx->cs.u32Limit
4678 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4679 return iemRaiseGeneralProtectionFault0(pIemCpu);
4680 pCtx->rip = uNewIp;
4681 break;
4682 }
4683
4684 case IEMMODE_32BIT:
4685 {
4686 Assert(pCtx->rip <= UINT32_MAX);
4687 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4688
4689 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4690 if (uNewEip > pCtx->cs.u32Limit)
4691 return iemRaiseGeneralProtectionFault0(pIemCpu);
4692 pCtx->rip = uNewEip;
4693 break;
4694 }
4695
4696 case IEMMODE_64BIT:
4697 {
4698 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4699
4700 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4701 if (!IEM_IS_CANONICAL(uNewRip))
4702 return iemRaiseGeneralProtectionFault0(pIemCpu);
4703 pCtx->rip = uNewRip;
4704 break;
4705 }
4706
4707 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4708 }
4709
4710 pCtx->eflags.Bits.u1RF = 0;
4711 return VINF_SUCCESS;
4712}
4713
4714
4715/**
4716 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4717 *
4718 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4719 * segment limit.
4720 *
4721 * @returns Strict VBox status code.
4722 * @param pIemCpu The per CPU data.
4723 * @param offNextInstr The offset of the next instruction.
4724 */
4725IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4726{
4727 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4728 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4729
4730 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4731 if ( uNewIp > pCtx->cs.u32Limit
4732 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4733 return iemRaiseGeneralProtectionFault0(pIemCpu);
4734 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4735 pCtx->rip = uNewIp;
4736 pCtx->eflags.Bits.u1RF = 0;
4737
4738 return VINF_SUCCESS;
4739}
4740
4741
4742/**
4743 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4744 *
4745 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4746 * segment limit.
4747 *
4748 * @returns Strict VBox status code.
4749 * @param pIemCpu The per CPU data.
4750 * @param offNextInstr The offset of the next instruction.
4751 */
4752IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4753{
4754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4755 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4756
4757 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4758 {
4759 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4760
4761 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4762 if (uNewEip > pCtx->cs.u32Limit)
4763 return iemRaiseGeneralProtectionFault0(pIemCpu);
4764 pCtx->rip = uNewEip;
4765 }
4766 else
4767 {
4768 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4769
4770 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4771 if (!IEM_IS_CANONICAL(uNewRip))
4772 return iemRaiseGeneralProtectionFault0(pIemCpu);
4773 pCtx->rip = uNewRip;
4774 }
4775 pCtx->eflags.Bits.u1RF = 0;
4776 return VINF_SUCCESS;
4777}
4778
4779
4780/**
4781 * Performs a near jump to the specified address.
4782 *
4783 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4784 * segment limit.
4785 *
4786 * @param pIemCpu The per CPU data.
4787 * @param uNewRip The new RIP value.
4788 */
4789IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4790{
4791 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4792 switch (pIemCpu->enmEffOpSize)
4793 {
4794 case IEMMODE_16BIT:
4795 {
4796 Assert(uNewRip <= UINT16_MAX);
4797 if ( uNewRip > pCtx->cs.u32Limit
4798 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4799 return iemRaiseGeneralProtectionFault0(pIemCpu);
4800 /** @todo Test 16-bit jump in 64-bit mode. */
4801 pCtx->rip = uNewRip;
4802 break;
4803 }
4804
4805 case IEMMODE_32BIT:
4806 {
4807 Assert(uNewRip <= UINT32_MAX);
4808 Assert(pCtx->rip <= UINT32_MAX);
4809 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4810
4811 if (uNewRip > pCtx->cs.u32Limit)
4812 return iemRaiseGeneralProtectionFault0(pIemCpu);
4813 pCtx->rip = uNewRip;
4814 break;
4815 }
4816
4817 case IEMMODE_64BIT:
4818 {
4819 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4820
4821 if (!IEM_IS_CANONICAL(uNewRip))
4822 return iemRaiseGeneralProtectionFault0(pIemCpu);
4823 pCtx->rip = uNewRip;
4824 break;
4825 }
4826
4827 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4828 }
4829
4830 pCtx->eflags.Bits.u1RF = 0;
4831 return VINF_SUCCESS;
4832}
4833
4834
4835/**
4836 * Get the address of the top of the stack.
4837 *
4838 * @param pIemCpu The per CPU data.
4839 * @param pCtx The CPU context which SP/ESP/RSP should be
4840 * read.
4841 */
4842DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4843{
4844 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4845 return pCtx->rsp;
4846 if (pCtx->ss.Attr.n.u1DefBig)
4847 return pCtx->esp;
4848 return pCtx->sp;
4849}
4850
4851
4852/**
4853 * Updates the RIP/EIP/IP to point to the next instruction.
4854 *
4855 * This function leaves the EFLAGS.RF flag alone.
4856 *
4857 * @param pIemCpu The per CPU data.
4858 * @param cbInstr The number of bytes to add.
4859 */
4860IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4861{
4862 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4863 switch (pIemCpu->enmCpuMode)
4864 {
4865 case IEMMODE_16BIT:
4866 Assert(pCtx->rip <= UINT16_MAX);
4867 pCtx->eip += cbInstr;
4868 pCtx->eip &= UINT32_C(0xffff);
4869 break;
4870
4871 case IEMMODE_32BIT:
4872 pCtx->eip += cbInstr;
4873 Assert(pCtx->rip <= UINT32_MAX);
4874 break;
4875
4876 case IEMMODE_64BIT:
4877 pCtx->rip += cbInstr;
4878 break;
4879 default: AssertFailed();
4880 }
4881}
4882
4883
4884#if 0
4885/**
4886 * Updates the RIP/EIP/IP to point to the next instruction.
4887 *
4888 * @param pIemCpu The per CPU data.
4889 */
4890IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4891{
4892 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4893}
4894#endif
4895
4896
4897
4898/**
4899 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4900 *
4901 * @param pIemCpu The per CPU data.
4902 * @param cbInstr The number of bytes to add.
4903 */
4904IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4905{
4906 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4907
4908 pCtx->eflags.Bits.u1RF = 0;
4909
4910 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4911 switch (pIemCpu->enmCpuMode)
4912 {
4913 /** @todo investigate if EIP or RIP is really incremented. */
4914 case IEMMODE_16BIT:
4915 case IEMMODE_32BIT:
4916 pCtx->eip += cbInstr;
4917 Assert(pCtx->rip <= UINT32_MAX);
4918 break;
4919
4920 case IEMMODE_64BIT:
4921 pCtx->rip += cbInstr;
4922 break;
4923 default: AssertFailed();
4924 }
4925}
4926
4927
4928/**
4929 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4930 *
4931 * @param pIemCpu The per CPU data.
4932 */
4933IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4934{
4935 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4936}
4937
4938
4939/**
4940 * Adds to the stack pointer.
4941 *
4942 * @param pIemCpu The per CPU data.
4943 * @param pCtx The CPU context which SP/ESP/RSP should be
4944 * updated.
4945 * @param cbToAdd The number of bytes to add.
4946 */
4947DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
4948{
4949 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4950 pCtx->rsp += cbToAdd;
4951 else if (pCtx->ss.Attr.n.u1DefBig)
4952 pCtx->esp += cbToAdd;
4953 else
4954 pCtx->sp += cbToAdd;
4955}
4956
4957
4958/**
4959 * Subtracts from the stack pointer.
4960 *
4961 * @param pIemCpu The per CPU data.
4962 * @param pCtx The CPU context which SP/ESP/RSP should be
4963 * updated.
4964 * @param cbToSub The number of bytes to subtract.
4965 */
4966DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4967{
4968 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4969 pCtx->rsp -= cbToSub;
4970 else if (pCtx->ss.Attr.n.u1DefBig)
4971 pCtx->esp -= cbToSub;
4972 else
4973 pCtx->sp -= cbToSub;
4974}
4975
4976
4977/**
4978 * Adds to the temporary stack pointer.
4979 *
4980 * @param pIemCpu The per CPU data.
4981 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4982 * @param cbToAdd The number of bytes to add.
4983 * @param pCtx Where to get the current stack mode.
4984 */
4985DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4986{
4987 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4988 pTmpRsp->u += cbToAdd;
4989 else if (pCtx->ss.Attr.n.u1DefBig)
4990 pTmpRsp->DWords.dw0 += cbToAdd;
4991 else
4992 pTmpRsp->Words.w0 += cbToAdd;
4993}
4994
4995
4996/**
4997 * Subtracts from the temporary stack pointer.
4998 *
4999 * @param pIemCpu The per CPU data.
5000 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5001 * @param cbToSub The number of bytes to subtract.
5002 * @param pCtx Where to get the current stack mode.
5003 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5004 * expecting that.
5005 */
5006DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5007{
5008 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5009 pTmpRsp->u -= cbToSub;
5010 else if (pCtx->ss.Attr.n.u1DefBig)
5011 pTmpRsp->DWords.dw0 -= cbToSub;
5012 else
5013 pTmpRsp->Words.w0 -= cbToSub;
5014}
5015
5016
5017/**
5018 * Calculates the effective stack address for a push of the specified size as
5019 * well as the new RSP value (upper bits may be masked).
5020 *
5021 * @returns Effective stack addressf for the push.
5022 * @param pIemCpu The IEM per CPU data.
5023 * @param pCtx Where to get the current stack mode.
5024 * @param cbItem The size of the stack item to pop.
5025 * @param puNewRsp Where to return the new RSP value.
5026 */
5027DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5028{
5029 RTUINT64U uTmpRsp;
5030 RTGCPTR GCPtrTop;
5031 uTmpRsp.u = pCtx->rsp;
5032
5033 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5034 GCPtrTop = uTmpRsp.u -= cbItem;
5035 else if (pCtx->ss.Attr.n.u1DefBig)
5036 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5037 else
5038 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5039 *puNewRsp = uTmpRsp.u;
5040 return GCPtrTop;
5041}
5042
5043
5044/**
5045 * Gets the current stack pointer and calculates the value after a pop of the
5046 * specified size.
5047 *
5048 * @returns Current stack pointer.
5049 * @param pIemCpu The per CPU data.
5050 * @param pCtx Where to get the current stack mode.
5051 * @param cbItem The size of the stack item to pop.
5052 * @param puNewRsp Where to return the new RSP value.
5053 */
5054DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5055{
5056 RTUINT64U uTmpRsp;
5057 RTGCPTR GCPtrTop;
5058 uTmpRsp.u = pCtx->rsp;
5059
5060 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5061 {
5062 GCPtrTop = uTmpRsp.u;
5063 uTmpRsp.u += cbItem;
5064 }
5065 else if (pCtx->ss.Attr.n.u1DefBig)
5066 {
5067 GCPtrTop = uTmpRsp.DWords.dw0;
5068 uTmpRsp.DWords.dw0 += cbItem;
5069 }
5070 else
5071 {
5072 GCPtrTop = uTmpRsp.Words.w0;
5073 uTmpRsp.Words.w0 += cbItem;
5074 }
5075 *puNewRsp = uTmpRsp.u;
5076 return GCPtrTop;
5077}
5078
5079
5080/**
5081 * Calculates the effective stack address for a push of the specified size as
5082 * well as the new temporary RSP value (upper bits may be masked).
5083 *
5084 * @returns Effective stack addressf for the push.
5085 * @param pIemCpu The per CPU data.
5086 * @param pTmpRsp The temporary stack pointer. This is updated.
5087 * @param cbItem The size of the stack item to pop.
5088 * @param puNewRsp Where to return the new RSP value.
5089 */
5090DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5091{
5092 RTGCPTR GCPtrTop;
5093
5094 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5095 GCPtrTop = pTmpRsp->u -= cbItem;
5096 else if (pCtx->ss.Attr.n.u1DefBig)
5097 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5098 else
5099 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5100 return GCPtrTop;
5101}
5102
5103
5104/**
5105 * Gets the effective stack address for a pop of the specified size and
5106 * calculates and updates the temporary RSP.
5107 *
5108 * @returns Current stack pointer.
5109 * @param pIemCpu The per CPU data.
5110 * @param pTmpRsp The temporary stack pointer. This is updated.
5111 * @param pCtx Where to get the current stack mode.
5112 * @param cbItem The size of the stack item to pop.
5113 */
5114DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5115{
5116 RTGCPTR GCPtrTop;
5117 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5118 {
5119 GCPtrTop = pTmpRsp->u;
5120 pTmpRsp->u += cbItem;
5121 }
5122 else if (pCtx->ss.Attr.n.u1DefBig)
5123 {
5124 GCPtrTop = pTmpRsp->DWords.dw0;
5125 pTmpRsp->DWords.dw0 += cbItem;
5126 }
5127 else
5128 {
5129 GCPtrTop = pTmpRsp->Words.w0;
5130 pTmpRsp->Words.w0 += cbItem;
5131 }
5132 return GCPtrTop;
5133}
5134
5135/** @} */
5136
5137
5138/** @name FPU access and helpers.
5139 *
5140 * @{
5141 */
5142
5143
5144/**
5145 * Hook for preparing to use the host FPU.
5146 *
5147 * This is necessary in ring-0 and raw-mode context.
5148 *
5149 * @param pIemCpu The IEM per CPU data.
5150 */
5151DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5152{
5153#ifdef IN_RING3
5154 NOREF(pIemCpu);
5155#else
5156/** @todo RZ: FIXME */
5157//# error "Implement me"
5158#endif
5159}
5160
5161
5162/**
5163 * Hook for preparing to use the host FPU for SSE
5164 *
5165 * This is necessary in ring-0 and raw-mode context.
5166 *
5167 * @param pIemCpu The IEM per CPU data.
5168 */
5169DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5170{
5171 iemFpuPrepareUsage(pIemCpu);
5172}
5173
5174
5175/**
5176 * Stores a QNaN value into a FPU register.
5177 *
5178 * @param pReg Pointer to the register.
5179 */
5180DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5181{
5182 pReg->au32[0] = UINT32_C(0x00000000);
5183 pReg->au32[1] = UINT32_C(0xc0000000);
5184 pReg->au16[4] = UINT16_C(0xffff);
5185}
5186
5187
5188/**
5189 * Updates the FOP, FPU.CS and FPUIP registers.
5190 *
5191 * @param pIemCpu The IEM per CPU data.
5192 * @param pCtx The CPU context.
5193 * @param pFpuCtx The FPU context.
5194 */
5195DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5196{
5197 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5198 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5199 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5200 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5201 {
5202 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5203 * happens in real mode here based on the fnsave and fnstenv images. */
5204 pFpuCtx->CS = 0;
5205 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5206 }
5207 else
5208 {
5209 pFpuCtx->CS = pCtx->cs.Sel;
5210 pFpuCtx->FPUIP = pCtx->rip;
5211 }
5212}
5213
5214
5215/**
5216 * Updates the x87.DS and FPUDP registers.
5217 *
5218 * @param pIemCpu The IEM per CPU data.
5219 * @param pCtx The CPU context.
5220 * @param pFpuCtx The FPU context.
5221 * @param iEffSeg The effective segment register.
5222 * @param GCPtrEff The effective address relative to @a iEffSeg.
5223 */
5224DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5225{
5226 RTSEL sel;
5227 switch (iEffSeg)
5228 {
5229 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5230 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5231 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5232 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5233 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5234 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5235 default:
5236 AssertMsgFailed(("%d\n", iEffSeg));
5237 sel = pCtx->ds.Sel;
5238 }
5239 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5240 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5241 {
5242 pFpuCtx->DS = 0;
5243 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5244 }
5245 else
5246 {
5247 pFpuCtx->DS = sel;
5248 pFpuCtx->FPUDP = GCPtrEff;
5249 }
5250}
5251
5252
5253/**
5254 * Rotates the stack registers in the push direction.
5255 *
5256 * @param pFpuCtx The FPU context.
5257 * @remarks This is a complete waste of time, but fxsave stores the registers in
5258 * stack order.
5259 */
5260DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5261{
5262 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5263 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5264 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5265 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5266 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5267 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5268 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5269 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5270 pFpuCtx->aRegs[0].r80 = r80Tmp;
5271}
5272
5273
5274/**
5275 * Rotates the stack registers in the pop direction.
5276 *
5277 * @param pFpuCtx The FPU context.
5278 * @remarks This is a complete waste of time, but fxsave stores the registers in
5279 * stack order.
5280 */
5281DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5282{
5283 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5284 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5285 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5286 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5287 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5288 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5289 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5290 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5291 pFpuCtx->aRegs[7].r80 = r80Tmp;
5292}
5293
5294
5295/**
5296 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5297 * exception prevents it.
5298 *
5299 * @param pIemCpu The IEM per CPU data.
5300 * @param pResult The FPU operation result to push.
5301 * @param pFpuCtx The FPU context.
5302 */
5303IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5304{
5305 /* Update FSW and bail if there are pending exceptions afterwards. */
5306 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5307 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5308 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5309 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5310 {
5311 pFpuCtx->FSW = fFsw;
5312 return;
5313 }
5314
5315 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5316 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5317 {
5318 /* All is fine, push the actual value. */
5319 pFpuCtx->FTW |= RT_BIT(iNewTop);
5320 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5321 }
5322 else if (pFpuCtx->FCW & X86_FCW_IM)
5323 {
5324 /* Masked stack overflow, push QNaN. */
5325 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5326 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5327 }
5328 else
5329 {
5330 /* Raise stack overflow, don't push anything. */
5331 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5332 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5333 return;
5334 }
5335
5336 fFsw &= ~X86_FSW_TOP_MASK;
5337 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5338 pFpuCtx->FSW = fFsw;
5339
5340 iemFpuRotateStackPush(pFpuCtx);
5341}
5342
5343
5344/**
5345 * Stores a result in a FPU register and updates the FSW and FTW.
5346 *
5347 * @param pFpuCtx The FPU context.
5348 * @param pResult The result to store.
5349 * @param iStReg Which FPU register to store it in.
5350 */
5351IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5352{
5353 Assert(iStReg < 8);
5354 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5355 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5356 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5357 pFpuCtx->FTW |= RT_BIT(iReg);
5358 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5359}
5360
5361
5362/**
5363 * Only updates the FPU status word (FSW) with the result of the current
5364 * instruction.
5365 *
5366 * @param pFpuCtx The FPU context.
5367 * @param u16FSW The FSW output of the current instruction.
5368 */
5369IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5370{
5371 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5372 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5373}
5374
5375
5376/**
5377 * Pops one item off the FPU stack if no pending exception prevents it.
5378 *
5379 * @param pFpuCtx The FPU context.
5380 */
5381IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5382{
5383 /* Check pending exceptions. */
5384 uint16_t uFSW = pFpuCtx->FSW;
5385 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5386 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5387 return;
5388
5389 /* TOP--. */
5390 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5391 uFSW &= ~X86_FSW_TOP_MASK;
5392 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5393 pFpuCtx->FSW = uFSW;
5394
5395 /* Mark the previous ST0 as empty. */
5396 iOldTop >>= X86_FSW_TOP_SHIFT;
5397 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5398
5399 /* Rotate the registers. */
5400 iemFpuRotateStackPop(pFpuCtx);
5401}
5402
5403
5404/**
5405 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5406 *
5407 * @param pIemCpu The IEM per CPU data.
5408 * @param pResult The FPU operation result to push.
5409 */
5410IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5411{
5412 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5413 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5414 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5415 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5416}
5417
5418
5419/**
5420 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5421 * and sets FPUDP and FPUDS.
5422 *
5423 * @param pIemCpu The IEM per CPU data.
5424 * @param pResult The FPU operation result to push.
5425 * @param iEffSeg The effective segment register.
5426 * @param GCPtrEff The effective address relative to @a iEffSeg.
5427 */
5428IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5429{
5430 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5431 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5432 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5433 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5434 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5435}
5436
5437
5438/**
5439 * Replace ST0 with the first value and push the second onto the FPU stack,
5440 * unless a pending exception prevents it.
5441 *
5442 * @param pIemCpu The IEM per CPU data.
5443 * @param pResult The FPU operation result to store and push.
5444 */
5445IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5446{
5447 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5448 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5449 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5450
5451 /* Update FSW and bail if there are pending exceptions afterwards. */
5452 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5453 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5454 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5455 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5456 {
5457 pFpuCtx->FSW = fFsw;
5458 return;
5459 }
5460
5461 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5462 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5463 {
5464 /* All is fine, push the actual value. */
5465 pFpuCtx->FTW |= RT_BIT(iNewTop);
5466 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5467 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5468 }
5469 else if (pFpuCtx->FCW & X86_FCW_IM)
5470 {
5471 /* Masked stack overflow, push QNaN. */
5472 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5473 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5474 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5475 }
5476 else
5477 {
5478 /* Raise stack overflow, don't push anything. */
5479 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5480 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5481 return;
5482 }
5483
5484 fFsw &= ~X86_FSW_TOP_MASK;
5485 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5486 pFpuCtx->FSW = fFsw;
5487
5488 iemFpuRotateStackPush(pFpuCtx);
5489}
5490
5491
5492/**
5493 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5494 * FOP.
5495 *
5496 * @param pIemCpu The IEM per CPU data.
5497 * @param pResult The result to store.
5498 * @param iStReg Which FPU register to store it in.
5499 * @param pCtx The CPU context.
5500 */
5501IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5502{
5503 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5504 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5505 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5506 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5507}
5508
5509
5510/**
5511 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5512 * FOP, and then pops the stack.
5513 *
5514 * @param pIemCpu The IEM per CPU data.
5515 * @param pResult The result to store.
5516 * @param iStReg Which FPU register to store it in.
5517 * @param pCtx The CPU context.
5518 */
5519IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5520{
5521 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5522 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5523 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5524 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5525 iemFpuMaybePopOne(pFpuCtx);
5526}
5527
5528
5529/**
5530 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5531 * FPUDP, and FPUDS.
5532 *
5533 * @param pIemCpu The IEM per CPU data.
5534 * @param pResult The result to store.
5535 * @param iStReg Which FPU register to store it in.
5536 * @param pCtx The CPU context.
5537 * @param iEffSeg The effective memory operand selector register.
5538 * @param GCPtrEff The effective memory operand offset.
5539 */
5540IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5541{
5542 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5543 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5544 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5545 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5546 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5547}
5548
5549
5550/**
5551 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5552 * FPUDP, and FPUDS, and then pops the stack.
5553 *
5554 * @param pIemCpu The IEM per CPU data.
5555 * @param pResult The result to store.
5556 * @param iStReg Which FPU register to store it in.
5557 * @param pCtx The CPU context.
5558 * @param iEffSeg The effective memory operand selector register.
5559 * @param GCPtrEff The effective memory operand offset.
5560 */
5561IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5562 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5563{
5564 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5565 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5566 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5567 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5568 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5569 iemFpuMaybePopOne(pFpuCtx);
5570}
5571
5572
5573/**
5574 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5575 *
5576 * @param pIemCpu The IEM per CPU data.
5577 */
5578IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5579{
5580 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5581 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5582 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5583}
5584
5585
5586/**
5587 * Marks the specified stack register as free (for FFREE).
5588 *
5589 * @param pIemCpu The IEM per CPU data.
5590 * @param iStReg The register to free.
5591 */
5592IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5593{
5594 Assert(iStReg < 8);
5595 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5596 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5597 pFpuCtx->FTW &= ~RT_BIT(iReg);
5598}
5599
5600
5601/**
5602 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5603 *
5604 * @param pIemCpu The IEM per CPU data.
5605 */
5606IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5607{
5608 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5609 uint16_t uFsw = pFpuCtx->FSW;
5610 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5611 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5612 uFsw &= ~X86_FSW_TOP_MASK;
5613 uFsw |= uTop;
5614 pFpuCtx->FSW = uFsw;
5615}
5616
5617
5618/**
5619 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5620 *
5621 * @param pIemCpu The IEM per CPU data.
5622 */
5623IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5624{
5625 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5626 uint16_t uFsw = pFpuCtx->FSW;
5627 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5628 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5629 uFsw &= ~X86_FSW_TOP_MASK;
5630 uFsw |= uTop;
5631 pFpuCtx->FSW = uFsw;
5632}
5633
5634
5635/**
5636 * Updates the FSW, FOP, FPUIP, and FPUCS.
5637 *
5638 * @param pIemCpu The IEM per CPU data.
5639 * @param u16FSW The FSW from the current instruction.
5640 */
5641IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5642{
5643 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5644 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5645 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5646 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5647}
5648
5649
5650/**
5651 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5652 *
5653 * @param pIemCpu The IEM per CPU data.
5654 * @param u16FSW The FSW from the current instruction.
5655 */
5656IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5657{
5658 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5659 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5660 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5661 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5662 iemFpuMaybePopOne(pFpuCtx);
5663}
5664
5665
5666/**
5667 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5668 *
5669 * @param pIemCpu The IEM per CPU data.
5670 * @param u16FSW The FSW from the current instruction.
5671 * @param iEffSeg The effective memory operand selector register.
5672 * @param GCPtrEff The effective memory operand offset.
5673 */
5674IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5675{
5676 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5677 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5678 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5679 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5680 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5681}
5682
5683
5684/**
5685 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5686 *
5687 * @param pIemCpu The IEM per CPU data.
5688 * @param u16FSW The FSW from the current instruction.
5689 */
5690IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5691{
5692 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5693 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5694 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5695 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5696 iemFpuMaybePopOne(pFpuCtx);
5697 iemFpuMaybePopOne(pFpuCtx);
5698}
5699
5700
5701/**
5702 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5703 *
5704 * @param pIemCpu The IEM per CPU data.
5705 * @param u16FSW The FSW from the current instruction.
5706 * @param iEffSeg The effective memory operand selector register.
5707 * @param GCPtrEff The effective memory operand offset.
5708 */
5709IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5710{
5711 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5712 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5713 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5714 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5715 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5716 iemFpuMaybePopOne(pFpuCtx);
5717}
5718
5719
5720/**
5721 * Worker routine for raising an FPU stack underflow exception.
5722 *
5723 * @param pIemCpu The IEM per CPU data.
5724 * @param pFpuCtx The FPU context.
5725 * @param iStReg The stack register being accessed.
5726 */
5727IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5728{
5729 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5730 if (pFpuCtx->FCW & X86_FCW_IM)
5731 {
5732 /* Masked underflow. */
5733 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5734 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5735 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5736 if (iStReg != UINT8_MAX)
5737 {
5738 pFpuCtx->FTW |= RT_BIT(iReg);
5739 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5740 }
5741 }
5742 else
5743 {
5744 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5745 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5746 }
5747}
5748
5749
5750/**
5751 * Raises a FPU stack underflow exception.
5752 *
5753 * @param pIemCpu The IEM per CPU data.
5754 * @param iStReg The destination register that should be loaded
5755 * with QNaN if \#IS is not masked. Specify
5756 * UINT8_MAX if none (like for fcom).
5757 */
5758DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5759{
5760 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5761 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5762 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5763 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5764}
5765
5766
5767DECL_NO_INLINE(IEM_STATIC, void)
5768iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5769{
5770 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5771 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5772 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5773 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5774 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5775}
5776
5777
5778DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5779{
5780 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5781 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5782 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5783 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5784 iemFpuMaybePopOne(pFpuCtx);
5785}
5786
5787
5788DECL_NO_INLINE(IEM_STATIC, void)
5789iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5790{
5791 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5792 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5793 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5794 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5795 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5796 iemFpuMaybePopOne(pFpuCtx);
5797}
5798
5799
5800DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5801{
5802 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5803 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5804 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5805 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5806 iemFpuMaybePopOne(pFpuCtx);
5807 iemFpuMaybePopOne(pFpuCtx);
5808}
5809
5810
5811DECL_NO_INLINE(IEM_STATIC, void)
5812iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5813{
5814 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5815 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5816 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5817
5818 if (pFpuCtx->FCW & X86_FCW_IM)
5819 {
5820 /* Masked overflow - Push QNaN. */
5821 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5822 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5823 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5824 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5825 pFpuCtx->FTW |= RT_BIT(iNewTop);
5826 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5827 iemFpuRotateStackPush(pFpuCtx);
5828 }
5829 else
5830 {
5831 /* Exception pending - don't change TOP or the register stack. */
5832 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5833 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5834 }
5835}
5836
5837
5838DECL_NO_INLINE(IEM_STATIC, void)
5839iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5840{
5841 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5842 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5843 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5844
5845 if (pFpuCtx->FCW & X86_FCW_IM)
5846 {
5847 /* Masked overflow - Push QNaN. */
5848 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5849 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5850 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5851 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5852 pFpuCtx->FTW |= RT_BIT(iNewTop);
5853 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5854 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5855 iemFpuRotateStackPush(pFpuCtx);
5856 }
5857 else
5858 {
5859 /* Exception pending - don't change TOP or the register stack. */
5860 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5861 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5862 }
5863}
5864
5865
5866/**
5867 * Worker routine for raising an FPU stack overflow exception on a push.
5868 *
5869 * @param pFpuCtx The FPU context.
5870 */
5871IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5872{
5873 if (pFpuCtx->FCW & X86_FCW_IM)
5874 {
5875 /* Masked overflow. */
5876 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5877 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5878 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5879 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5880 pFpuCtx->FTW |= RT_BIT(iNewTop);
5881 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5882 iemFpuRotateStackPush(pFpuCtx);
5883 }
5884 else
5885 {
5886 /* Exception pending - don't change TOP or the register stack. */
5887 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5888 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5889 }
5890}
5891
5892
5893/**
5894 * Raises a FPU stack overflow exception on a push.
5895 *
5896 * @param pIemCpu The IEM per CPU data.
5897 */
5898DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5899{
5900 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5901 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5902 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5903 iemFpuStackPushOverflowOnly(pFpuCtx);
5904}
5905
5906
5907/**
5908 * Raises a FPU stack overflow exception on a push with a memory operand.
5909 *
5910 * @param pIemCpu The IEM per CPU data.
5911 * @param iEffSeg The effective memory operand selector register.
5912 * @param GCPtrEff The effective memory operand offset.
5913 */
5914DECL_NO_INLINE(IEM_STATIC, void)
5915iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5916{
5917 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5918 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5919 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5920 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5921 iemFpuStackPushOverflowOnly(pFpuCtx);
5922}
5923
5924
5925IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5926{
5927 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5928 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5929 if (pFpuCtx->FTW & RT_BIT(iReg))
5930 return VINF_SUCCESS;
5931 return VERR_NOT_FOUND;
5932}
5933
5934
5935IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5936{
5937 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5938 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5939 if (pFpuCtx->FTW & RT_BIT(iReg))
5940 {
5941 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
5942 return VINF_SUCCESS;
5943 }
5944 return VERR_NOT_FOUND;
5945}
5946
5947
5948IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5949 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5950{
5951 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5952 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5953 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5954 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5955 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5956 {
5957 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5958 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
5959 return VINF_SUCCESS;
5960 }
5961 return VERR_NOT_FOUND;
5962}
5963
5964
5965IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5966{
5967 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5968 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5969 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5970 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5971 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5972 {
5973 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5974 return VINF_SUCCESS;
5975 }
5976 return VERR_NOT_FOUND;
5977}
5978
5979
5980/**
5981 * Updates the FPU exception status after FCW is changed.
5982 *
5983 * @param pFpuCtx The FPU context.
5984 */
5985IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
5986{
5987 uint16_t u16Fsw = pFpuCtx->FSW;
5988 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
5989 u16Fsw |= X86_FSW_ES | X86_FSW_B;
5990 else
5991 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
5992 pFpuCtx->FSW = u16Fsw;
5993}
5994
5995
5996/**
5997 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
5998 *
5999 * @returns The full FTW.
6000 * @param pFpuCtx The FPU context.
6001 */
6002IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6003{
6004 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6005 uint16_t u16Ftw = 0;
6006 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6007 for (unsigned iSt = 0; iSt < 8; iSt++)
6008 {
6009 unsigned const iReg = (iSt + iTop) & 7;
6010 if (!(u8Ftw & RT_BIT(iReg)))
6011 u16Ftw |= 3 << (iReg * 2); /* empty */
6012 else
6013 {
6014 uint16_t uTag;
6015 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6016 if (pr80Reg->s.uExponent == 0x7fff)
6017 uTag = 2; /* Exponent is all 1's => Special. */
6018 else if (pr80Reg->s.uExponent == 0x0000)
6019 {
6020 if (pr80Reg->s.u64Mantissa == 0x0000)
6021 uTag = 1; /* All bits are zero => Zero. */
6022 else
6023 uTag = 2; /* Must be special. */
6024 }
6025 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6026 uTag = 0; /* Valid. */
6027 else
6028 uTag = 2; /* Must be special. */
6029
6030 u16Ftw |= uTag << (iReg * 2); /* empty */
6031 }
6032 }
6033
6034 return u16Ftw;
6035}
6036
6037
6038/**
6039 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6040 *
6041 * @returns The compressed FTW.
6042 * @param u16FullFtw The full FTW to convert.
6043 */
6044IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6045{
6046 uint8_t u8Ftw = 0;
6047 for (unsigned i = 0; i < 8; i++)
6048 {
6049 if ((u16FullFtw & 3) != 3 /*empty*/)
6050 u8Ftw |= RT_BIT(i);
6051 u16FullFtw >>= 2;
6052 }
6053
6054 return u8Ftw;
6055}
6056
6057/** @} */
6058
6059
6060/** @name Memory access.
6061 *
6062 * @{
6063 */
6064
6065
6066/**
6067 * Updates the IEMCPU::cbWritten counter if applicable.
6068 *
6069 * @param pIemCpu The IEM per CPU data.
6070 * @param fAccess The access being accounted for.
6071 * @param cbMem The access size.
6072 */
6073DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6074{
6075 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6076 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6077 pIemCpu->cbWritten += (uint32_t)cbMem;
6078}
6079
6080
6081/**
6082 * Checks if the given segment can be written to, raise the appropriate
6083 * exception if not.
6084 *
6085 * @returns VBox strict status code.
6086 *
6087 * @param pIemCpu The IEM per CPU data.
6088 * @param pHid Pointer to the hidden register.
6089 * @param iSegReg The register number.
6090 * @param pu64BaseAddr Where to return the base address to use for the
6091 * segment. (In 64-bit code it may differ from the
6092 * base in the hidden segment.)
6093 */
6094IEM_STATIC VBOXSTRICTRC
6095iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6096{
6097 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6098 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6099 else
6100 {
6101 if (!pHid->Attr.n.u1Present)
6102 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6103
6104 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6105 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6106 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6107 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6108 *pu64BaseAddr = pHid->u64Base;
6109 }
6110 return VINF_SUCCESS;
6111}
6112
6113
6114/**
6115 * Checks if the given segment can be read from, raise the appropriate
6116 * exception if not.
6117 *
6118 * @returns VBox strict status code.
6119 *
6120 * @param pIemCpu The IEM per CPU data.
6121 * @param pHid Pointer to the hidden register.
6122 * @param iSegReg The register number.
6123 * @param pu64BaseAddr Where to return the base address to use for the
6124 * segment. (In 64-bit code it may differ from the
6125 * base in the hidden segment.)
6126 */
6127IEM_STATIC VBOXSTRICTRC
6128iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6129{
6130 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6131 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6132 else
6133 {
6134 if (!pHid->Attr.n.u1Present)
6135 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6136
6137 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6138 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6139 *pu64BaseAddr = pHid->u64Base;
6140 }
6141 return VINF_SUCCESS;
6142}
6143
6144
6145/**
6146 * Applies the segment limit, base and attributes.
6147 *
6148 * This may raise a \#GP or \#SS.
6149 *
6150 * @returns VBox strict status code.
6151 *
6152 * @param pIemCpu The IEM per CPU data.
6153 * @param fAccess The kind of access which is being performed.
6154 * @param iSegReg The index of the segment register to apply.
6155 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6156 * TSS, ++).
6157 * @param pGCPtrMem Pointer to the guest memory address to apply
6158 * segmentation to. Input and output parameter.
6159 */
6160IEM_STATIC VBOXSTRICTRC
6161iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6162{
6163 if (iSegReg == UINT8_MAX)
6164 return VINF_SUCCESS;
6165
6166 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6167 switch (pIemCpu->enmCpuMode)
6168 {
6169 case IEMMODE_16BIT:
6170 case IEMMODE_32BIT:
6171 {
6172 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6173 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6174
6175 Assert(pSel->Attr.n.u1Present);
6176 Assert(pSel->Attr.n.u1DescType);
6177 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6178 {
6179 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6180 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6181 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6182
6183 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6184 {
6185 /** @todo CPL check. */
6186 }
6187
6188 /*
6189 * There are two kinds of data selectors, normal and expand down.
6190 */
6191 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6192 {
6193 if ( GCPtrFirst32 > pSel->u32Limit
6194 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6195 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6196 }
6197 else
6198 {
6199 /*
6200 * The upper boundary is defined by the B bit, not the G bit!
6201 */
6202 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6203 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6204 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6205 }
6206 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6207 }
6208 else
6209 {
6210
6211 /*
6212 * Code selector and usually be used to read thru, writing is
6213 * only permitted in real and V8086 mode.
6214 */
6215 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6216 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6217 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6218 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6219 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6220
6221 if ( GCPtrFirst32 > pSel->u32Limit
6222 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6223 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6224
6225 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6226 {
6227 /** @todo CPL check. */
6228 }
6229
6230 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6231 }
6232 return VINF_SUCCESS;
6233 }
6234
6235 case IEMMODE_64BIT:
6236 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6237 *pGCPtrMem += pSel->u64Base;
6238 return VINF_SUCCESS;
6239
6240 default:
6241 AssertFailedReturn(VERR_IEM_IPE_7);
6242 }
6243}
6244
6245
6246/**
6247 * Translates a virtual address to a physical physical address and checks if we
6248 * can access the page as specified.
6249 *
6250 * @param pIemCpu The IEM per CPU data.
6251 * @param GCPtrMem The virtual address.
6252 * @param fAccess The intended access.
6253 * @param pGCPhysMem Where to return the physical address.
6254 */
6255IEM_STATIC VBOXSTRICTRC
6256iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6257{
6258 /** @todo Need a different PGM interface here. We're currently using
6259 * generic / REM interfaces. this won't cut it for R0 & RC. */
6260 RTGCPHYS GCPhys;
6261 uint64_t fFlags;
6262 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6263 if (RT_FAILURE(rc))
6264 {
6265 /** @todo Check unassigned memory in unpaged mode. */
6266 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6267 *pGCPhysMem = NIL_RTGCPHYS;
6268 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6269 }
6270
6271 /* If the page is writable and does not have the no-exec bit set, all
6272 access is allowed. Otherwise we'll have to check more carefully... */
6273 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6274 {
6275 /* Write to read only memory? */
6276 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6277 && !(fFlags & X86_PTE_RW)
6278 && ( pIemCpu->uCpl != 0
6279 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6280 {
6281 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6282 *pGCPhysMem = NIL_RTGCPHYS;
6283 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6284 }
6285
6286 /* Kernel memory accessed by userland? */
6287 if ( !(fFlags & X86_PTE_US)
6288 && pIemCpu->uCpl == 3
6289 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6290 {
6291 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6292 *pGCPhysMem = NIL_RTGCPHYS;
6293 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6294 }
6295
6296 /* Executing non-executable memory? */
6297 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6298 && (fFlags & X86_PTE_PAE_NX)
6299 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6300 {
6301 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6302 *pGCPhysMem = NIL_RTGCPHYS;
6303 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6304 VERR_ACCESS_DENIED);
6305 }
6306 }
6307
6308 /*
6309 * Set the dirty / access flags.
6310 * ASSUMES this is set when the address is translated rather than on committ...
6311 */
6312 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6313 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6314 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6315 {
6316 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6317 AssertRC(rc2);
6318 }
6319
6320 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6321 *pGCPhysMem = GCPhys;
6322 return VINF_SUCCESS;
6323}
6324
6325
6326
6327/**
6328 * Maps a physical page.
6329 *
6330 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6331 * @param pIemCpu The IEM per CPU data.
6332 * @param GCPhysMem The physical address.
6333 * @param fAccess The intended access.
6334 * @param ppvMem Where to return the mapping address.
6335 * @param pLock The PGM lock.
6336 */
6337IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6338{
6339#ifdef IEM_VERIFICATION_MODE_FULL
6340 /* Force the alternative path so we can ignore writes. */
6341 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6342 {
6343 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6344 {
6345 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6346 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6347 if (RT_FAILURE(rc2))
6348 pIemCpu->fProblematicMemory = true;
6349 }
6350 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6351 }
6352#endif
6353#ifdef IEM_LOG_MEMORY_WRITES
6354 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6355 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6356#endif
6357#ifdef IEM_VERIFICATION_MODE_MINIMAL
6358 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6359#endif
6360
6361 /** @todo This API may require some improving later. A private deal with PGM
6362 * regarding locking and unlocking needs to be struct. A couple of TLBs
6363 * living in PGM, but with publicly accessible inlined access methods
6364 * could perhaps be an even better solution. */
6365 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6366 GCPhysMem,
6367 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6368 pIemCpu->fBypassHandlers,
6369 ppvMem,
6370 pLock);
6371 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6372 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6373
6374#ifdef IEM_VERIFICATION_MODE_FULL
6375 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6376 pIemCpu->fProblematicMemory = true;
6377#endif
6378 return rc;
6379}
6380
6381
6382/**
6383 * Unmap a page previously mapped by iemMemPageMap.
6384 *
6385 * @param pIemCpu The IEM per CPU data.
6386 * @param GCPhysMem The physical address.
6387 * @param fAccess The intended access.
6388 * @param pvMem What iemMemPageMap returned.
6389 * @param pLock The PGM lock.
6390 */
6391DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6392{
6393 NOREF(pIemCpu);
6394 NOREF(GCPhysMem);
6395 NOREF(fAccess);
6396 NOREF(pvMem);
6397 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6398}
6399
6400
6401/**
6402 * Looks up a memory mapping entry.
6403 *
6404 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6405 * @param pIemCpu The IEM per CPU data.
6406 * @param pvMem The memory address.
6407 * @param fAccess The access to.
6408 */
6409DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6410{
6411 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6412 if ( pIemCpu->aMemMappings[0].pv == pvMem
6413 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6414 return 0;
6415 if ( pIemCpu->aMemMappings[1].pv == pvMem
6416 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6417 return 1;
6418 if ( pIemCpu->aMemMappings[2].pv == pvMem
6419 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6420 return 2;
6421 return VERR_NOT_FOUND;
6422}
6423
6424
6425/**
6426 * Finds a free memmap entry when using iNextMapping doesn't work.
6427 *
6428 * @returns Memory mapping index, 1024 on failure.
6429 * @param pIemCpu The IEM per CPU data.
6430 */
6431IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6432{
6433 /*
6434 * The easy case.
6435 */
6436 if (pIemCpu->cActiveMappings == 0)
6437 {
6438 pIemCpu->iNextMapping = 1;
6439 return 0;
6440 }
6441
6442 /* There should be enough mappings for all instructions. */
6443 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6444
6445 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6446 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6447 return i;
6448
6449 AssertFailedReturn(1024);
6450}
6451
6452
6453/**
6454 * Commits a bounce buffer that needs writing back and unmaps it.
6455 *
6456 * @returns Strict VBox status code.
6457 * @param pIemCpu The IEM per CPU data.
6458 * @param iMemMap The index of the buffer to commit.
6459 */
6460IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6461{
6462 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6463 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6464
6465 /*
6466 * Do the writing.
6467 */
6468#ifndef IEM_VERIFICATION_MODE_MINIMAL
6469 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6470 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6471 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6472 {
6473 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6474 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6475 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6476 if (!pIemCpu->fBypassHandlers)
6477 {
6478 /*
6479 * Carefully and efficiently dealing with access handler return
6480 * codes make this a little bloated.
6481 */
6482 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6483 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6484 pbBuf,
6485 cbFirst,
6486 PGMACCESSORIGIN_IEM);
6487 if (rcStrict == VINF_SUCCESS)
6488 {
6489 if (cbSecond)
6490 {
6491 rcStrict = PGMPhysWrite(pVM,
6492 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6493 pbBuf + cbFirst,
6494 cbSecond,
6495 PGMACCESSORIGIN_IEM);
6496 if (rcStrict == VINF_SUCCESS)
6497 { /* nothing */ }
6498 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6499 {
6500 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6501 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6502 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6503 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6504 }
6505 else
6506 {
6507 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6508 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6509 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6510 return rcStrict;
6511 }
6512 }
6513 }
6514 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6515 {
6516 if (!cbSecond)
6517 {
6518 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6519 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6520 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6521 }
6522 else
6523 {
6524 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6525 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6526 pbBuf + cbFirst,
6527 cbSecond,
6528 PGMACCESSORIGIN_IEM);
6529 if (rcStrict2 == VINF_SUCCESS)
6530 {
6531 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6532 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6533 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6534 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6535 }
6536 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6537 {
6538 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6539 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6540 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6541 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6542 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6543 }
6544 else
6545 {
6546 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6547 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6548 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6549 return rcStrict2;
6550 }
6551 }
6552 }
6553 else
6554 {
6555 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6556 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6557 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6558 return rcStrict;
6559 }
6560 }
6561 else
6562 {
6563 /*
6564 * No access handlers, much simpler.
6565 */
6566 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6567 if (RT_SUCCESS(rc))
6568 {
6569 if (cbSecond)
6570 {
6571 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6572 if (RT_SUCCESS(rc))
6573 { /* likely */ }
6574 else
6575 {
6576 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6577 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6578 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6579 return rc;
6580 }
6581 }
6582 }
6583 else
6584 {
6585 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6586 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6587 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6588 return rc;
6589 }
6590 }
6591 }
6592#endif
6593
6594#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6595 /*
6596 * Record the write(s).
6597 */
6598 if (!pIemCpu->fNoRem)
6599 {
6600 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6601 if (pEvtRec)
6602 {
6603 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6604 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6605 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6606 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6607 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6608 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6609 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6610 }
6611 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6612 {
6613 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6614 if (pEvtRec)
6615 {
6616 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6617 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6618 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6619 memcpy(pEvtRec->u.RamWrite.ab,
6620 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6621 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6622 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6623 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6624 }
6625 }
6626 }
6627#endif
6628#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6629 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6630 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6631 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6632 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6633 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6634 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6635
6636 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6637 g_cbIemWrote = cbWrote;
6638 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6639#endif
6640
6641 /*
6642 * Free the mapping entry.
6643 */
6644 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6645 Assert(pIemCpu->cActiveMappings != 0);
6646 pIemCpu->cActiveMappings--;
6647 return VINF_SUCCESS;
6648}
6649
6650
6651/**
6652 * iemMemMap worker that deals with a request crossing pages.
6653 */
6654IEM_STATIC VBOXSTRICTRC
6655iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6656{
6657 /*
6658 * Do the address translations.
6659 */
6660 RTGCPHYS GCPhysFirst;
6661 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6662 if (rcStrict != VINF_SUCCESS)
6663 return rcStrict;
6664
6665/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6666 * last byte. */
6667 RTGCPHYS GCPhysSecond;
6668 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6669 if (rcStrict != VINF_SUCCESS)
6670 return rcStrict;
6671 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6672
6673 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6674#ifdef IEM_VERIFICATION_MODE_FULL
6675 /*
6676 * Detect problematic memory when verifying so we can select
6677 * the right execution engine. (TLB: Redo this.)
6678 */
6679 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6680 {
6681 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6682 if (RT_SUCCESS(rc2))
6683 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6684 if (RT_FAILURE(rc2))
6685 pIemCpu->fProblematicMemory = true;
6686 }
6687#endif
6688
6689
6690 /*
6691 * Read in the current memory content if it's a read, execute or partial
6692 * write access.
6693 */
6694 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6695 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6696 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6697
6698 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6699 {
6700 if (!pIemCpu->fBypassHandlers)
6701 {
6702 /*
6703 * Must carefully deal with access handler status codes here,
6704 * makes the code a bit bloated.
6705 */
6706 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6707 if (rcStrict == VINF_SUCCESS)
6708 {
6709 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6710 if (rcStrict == VINF_SUCCESS)
6711 { /*likely */ }
6712 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6713 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6714 else
6715 {
6716 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6717 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6718 return rcStrict;
6719 }
6720 }
6721 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6722 {
6723 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6724 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6725 {
6726 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6727 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6728 }
6729 else
6730 {
6731 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6732 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6733 return rcStrict2;
6734 }
6735 }
6736 else
6737 {
6738 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6739 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6740 return rcStrict;
6741 }
6742 }
6743 else
6744 {
6745 /*
6746 * No informational status codes here, much more straight forward.
6747 */
6748 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6749 if (RT_SUCCESS(rc))
6750 {
6751 Assert(rc == VINF_SUCCESS);
6752 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6753 if (RT_SUCCESS(rc))
6754 Assert(rc == VINF_SUCCESS);
6755 else
6756 {
6757 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6758 return rc;
6759 }
6760 }
6761 else
6762 {
6763 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6764 return rc;
6765 }
6766 }
6767
6768#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6769 if ( !pIemCpu->fNoRem
6770 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6771 {
6772 /*
6773 * Record the reads.
6774 */
6775 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6776 if (pEvtRec)
6777 {
6778 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6779 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6780 pEvtRec->u.RamRead.cb = cbFirstPage;
6781 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6782 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6783 }
6784 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6785 if (pEvtRec)
6786 {
6787 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6788 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6789 pEvtRec->u.RamRead.cb = cbSecondPage;
6790 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6791 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6792 }
6793 }
6794#endif
6795 }
6796#ifdef VBOX_STRICT
6797 else
6798 memset(pbBuf, 0xcc, cbMem);
6799 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6800 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6801#endif
6802
6803 /*
6804 * Commit the bounce buffer entry.
6805 */
6806 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6807 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6808 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6809 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6810 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6811 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6812 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6813 pIemCpu->iNextMapping = iMemMap + 1;
6814 pIemCpu->cActiveMappings++;
6815
6816 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6817 *ppvMem = pbBuf;
6818 return VINF_SUCCESS;
6819}
6820
6821
6822/**
6823 * iemMemMap woker that deals with iemMemPageMap failures.
6824 */
6825IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6826 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6827{
6828 /*
6829 * Filter out conditions we can handle and the ones which shouldn't happen.
6830 */
6831 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6832 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6833 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6834 {
6835 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6836 return rcMap;
6837 }
6838 pIemCpu->cPotentialExits++;
6839
6840 /*
6841 * Read in the current memory content if it's a read, execute or partial
6842 * write access.
6843 */
6844 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6845 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6846 {
6847 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6848 memset(pbBuf, 0xff, cbMem);
6849 else
6850 {
6851 int rc;
6852 if (!pIemCpu->fBypassHandlers)
6853 {
6854 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6855 if (rcStrict == VINF_SUCCESS)
6856 { /* nothing */ }
6857 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6858 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6859 else
6860 {
6861 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6862 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6863 return rcStrict;
6864 }
6865 }
6866 else
6867 {
6868 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6869 if (RT_SUCCESS(rc))
6870 { /* likely */ }
6871 else
6872 {
6873 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6874 GCPhysFirst, rc));
6875 return rc;
6876 }
6877 }
6878 }
6879
6880#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6881 if ( !pIemCpu->fNoRem
6882 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6883 {
6884 /*
6885 * Record the read.
6886 */
6887 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6888 if (pEvtRec)
6889 {
6890 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6891 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6892 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6893 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6894 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6895 }
6896 }
6897#endif
6898 }
6899#ifdef VBOX_STRICT
6900 else
6901 memset(pbBuf, 0xcc, cbMem);
6902#endif
6903#ifdef VBOX_STRICT
6904 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6905 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6906#endif
6907
6908 /*
6909 * Commit the bounce buffer entry.
6910 */
6911 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6912 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6913 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6914 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6915 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6916 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6917 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6918 pIemCpu->iNextMapping = iMemMap + 1;
6919 pIemCpu->cActiveMappings++;
6920
6921 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6922 *ppvMem = pbBuf;
6923 return VINF_SUCCESS;
6924}
6925
6926
6927
6928/**
6929 * Maps the specified guest memory for the given kind of access.
6930 *
6931 * This may be using bounce buffering of the memory if it's crossing a page
6932 * boundary or if there is an access handler installed for any of it. Because
6933 * of lock prefix guarantees, we're in for some extra clutter when this
6934 * happens.
6935 *
6936 * This may raise a \#GP, \#SS, \#PF or \#AC.
6937 *
6938 * @returns VBox strict status code.
6939 *
6940 * @param pIemCpu The IEM per CPU data.
6941 * @param ppvMem Where to return the pointer to the mapped
6942 * memory.
6943 * @param cbMem The number of bytes to map. This is usually 1,
6944 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6945 * string operations it can be up to a page.
6946 * @param iSegReg The index of the segment register to use for
6947 * this access. The base and limits are checked.
6948 * Use UINT8_MAX to indicate that no segmentation
6949 * is required (for IDT, GDT and LDT accesses).
6950 * @param GCPtrMem The address of the guest memory.
6951 * @param a_fAccess How the memory is being accessed. The
6952 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6953 * how to map the memory, while the
6954 * IEM_ACCESS_WHAT_XXX bit is used when raising
6955 * exceptions.
6956 */
6957IEM_STATIC VBOXSTRICTRC
6958iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
6959{
6960 /*
6961 * Check the input and figure out which mapping entry to use.
6962 */
6963 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6964 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6965
6966 unsigned iMemMap = pIemCpu->iNextMapping;
6967 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
6968 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6969 {
6970 iMemMap = iemMemMapFindFree(pIemCpu);
6971 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
6972 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
6973 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
6974 pIemCpu->aMemMappings[2].fAccess),
6975 VERR_IEM_IPE_9);
6976 }
6977
6978 /*
6979 * Map the memory, checking that we can actually access it. If something
6980 * slightly complicated happens, fall back on bounce buffering.
6981 */
6982 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6983 if (rcStrict != VINF_SUCCESS)
6984 return rcStrict;
6985
6986 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
6987 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6988
6989 RTGCPHYS GCPhysFirst;
6990 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
6991 if (rcStrict != VINF_SUCCESS)
6992 return rcStrict;
6993
6994 void *pvMem;
6995 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6996 if (rcStrict != VINF_SUCCESS)
6997 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6998
6999 /*
7000 * Fill in the mapping table entry.
7001 */
7002 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7003 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7004 pIemCpu->iNextMapping = iMemMap + 1;
7005 pIemCpu->cActiveMappings++;
7006
7007 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7008 *ppvMem = pvMem;
7009 return VINF_SUCCESS;
7010}
7011
7012
7013/**
7014 * Commits the guest memory if bounce buffered and unmaps it.
7015 *
7016 * @returns Strict VBox status code.
7017 * @param pIemCpu The IEM per CPU data.
7018 * @param pvMem The mapping.
7019 * @param fAccess The kind of access.
7020 */
7021IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7022{
7023 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7024 AssertReturn(iMemMap >= 0, iMemMap);
7025
7026 /* If it's bounce buffered, we may need to write back the buffer. */
7027 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7028 {
7029 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7030 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7031 }
7032 /* Otherwise unlock it. */
7033 else
7034 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7035
7036 /* Free the entry. */
7037 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7038 Assert(pIemCpu->cActiveMappings != 0);
7039 pIemCpu->cActiveMappings--;
7040 return VINF_SUCCESS;
7041}
7042
7043
7044/**
7045 * Rollbacks mappings, releasing page locks and such.
7046 *
7047 * The caller shall only call this after checking cActiveMappings.
7048 *
7049 * @returns Strict VBox status code to pass up.
7050 * @param pIemCpu The IEM per CPU data.
7051 */
7052IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7053{
7054 Assert(pIemCpu->cActiveMappings > 0);
7055
7056 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7057 while (iMemMap-- > 0)
7058 {
7059 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7060 if (fAccess != IEM_ACCESS_INVALID)
7061 {
7062 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7063 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7064 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7065 Assert(pIemCpu->cActiveMappings > 0);
7066 pIemCpu->cActiveMappings--;
7067 }
7068 }
7069}
7070
7071
7072/**
7073 * Fetches a data byte.
7074 *
7075 * @returns Strict VBox status code.
7076 * @param pIemCpu The IEM per CPU data.
7077 * @param pu8Dst Where to return the byte.
7078 * @param iSegReg The index of the segment register to use for
7079 * this access. The base and limits are checked.
7080 * @param GCPtrMem The address of the guest memory.
7081 */
7082IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7083{
7084 /* The lazy approach for now... */
7085 uint8_t const *pu8Src;
7086 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7087 if (rc == VINF_SUCCESS)
7088 {
7089 *pu8Dst = *pu8Src;
7090 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7091 }
7092 return rc;
7093}
7094
7095
7096/**
7097 * Fetches a data word.
7098 *
7099 * @returns Strict VBox status code.
7100 * @param pIemCpu The IEM per CPU data.
7101 * @param pu16Dst Where to return the word.
7102 * @param iSegReg The index of the segment register to use for
7103 * this access. The base and limits are checked.
7104 * @param GCPtrMem The address of the guest memory.
7105 */
7106IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7107{
7108 /* The lazy approach for now... */
7109 uint16_t const *pu16Src;
7110 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7111 if (rc == VINF_SUCCESS)
7112 {
7113 *pu16Dst = *pu16Src;
7114 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7115 }
7116 return rc;
7117}
7118
7119
7120/**
7121 * Fetches a data dword.
7122 *
7123 * @returns Strict VBox status code.
7124 * @param pIemCpu The IEM per CPU data.
7125 * @param pu32Dst Where to return the dword.
7126 * @param iSegReg The index of the segment register to use for
7127 * this access. The base and limits are checked.
7128 * @param GCPtrMem The address of the guest memory.
7129 */
7130IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7131{
7132 /* The lazy approach for now... */
7133 uint32_t const *pu32Src;
7134 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7135 if (rc == VINF_SUCCESS)
7136 {
7137 *pu32Dst = *pu32Src;
7138 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7139 }
7140 return rc;
7141}
7142
7143
7144#ifdef SOME_UNUSED_FUNCTION
7145/**
7146 * Fetches a data dword and sign extends it to a qword.
7147 *
7148 * @returns Strict VBox status code.
7149 * @param pIemCpu The IEM per CPU data.
7150 * @param pu64Dst Where to return the sign extended value.
7151 * @param iSegReg The index of the segment register to use for
7152 * this access. The base and limits are checked.
7153 * @param GCPtrMem The address of the guest memory.
7154 */
7155IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7156{
7157 /* The lazy approach for now... */
7158 int32_t const *pi32Src;
7159 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7160 if (rc == VINF_SUCCESS)
7161 {
7162 *pu64Dst = *pi32Src;
7163 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7164 }
7165#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7166 else
7167 *pu64Dst = 0;
7168#endif
7169 return rc;
7170}
7171#endif
7172
7173
7174/**
7175 * Fetches a data qword.
7176 *
7177 * @returns Strict VBox status code.
7178 * @param pIemCpu The IEM per CPU data.
7179 * @param pu64Dst Where to return the qword.
7180 * @param iSegReg The index of the segment register to use for
7181 * this access. The base and limits are checked.
7182 * @param GCPtrMem The address of the guest memory.
7183 */
7184IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7185{
7186 /* The lazy approach for now... */
7187 uint64_t const *pu64Src;
7188 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7189 if (rc == VINF_SUCCESS)
7190 {
7191 *pu64Dst = *pu64Src;
7192 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7193 }
7194 return rc;
7195}
7196
7197
7198/**
7199 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7200 *
7201 * @returns Strict VBox status code.
7202 * @param pIemCpu The IEM per CPU data.
7203 * @param pu64Dst Where to return the qword.
7204 * @param iSegReg The index of the segment register to use for
7205 * this access. The base and limits are checked.
7206 * @param GCPtrMem The address of the guest memory.
7207 */
7208IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7209{
7210 /* The lazy approach for now... */
7211 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7212 if (RT_UNLIKELY(GCPtrMem & 15))
7213 return iemRaiseGeneralProtectionFault0(pIemCpu);
7214
7215 uint64_t const *pu64Src;
7216 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7217 if (rc == VINF_SUCCESS)
7218 {
7219 *pu64Dst = *pu64Src;
7220 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7221 }
7222 return rc;
7223}
7224
7225
7226/**
7227 * Fetches a data tword.
7228 *
7229 * @returns Strict VBox status code.
7230 * @param pIemCpu The IEM per CPU data.
7231 * @param pr80Dst Where to return the tword.
7232 * @param iSegReg The index of the segment register to use for
7233 * this access. The base and limits are checked.
7234 * @param GCPtrMem The address of the guest memory.
7235 */
7236IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7237{
7238 /* The lazy approach for now... */
7239 PCRTFLOAT80U pr80Src;
7240 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7241 if (rc == VINF_SUCCESS)
7242 {
7243 *pr80Dst = *pr80Src;
7244 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7245 }
7246 return rc;
7247}
7248
7249
7250/**
7251 * Fetches a data dqword (double qword), generally SSE related.
7252 *
7253 * @returns Strict VBox status code.
7254 * @param pIemCpu The IEM per CPU data.
7255 * @param pu128Dst Where to return the qword.
7256 * @param iSegReg The index of the segment register to use for
7257 * this access. The base and limits are checked.
7258 * @param GCPtrMem The address of the guest memory.
7259 */
7260IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7261{
7262 /* The lazy approach for now... */
7263 uint128_t const *pu128Src;
7264 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7265 if (rc == VINF_SUCCESS)
7266 {
7267 *pu128Dst = *pu128Src;
7268 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7269 }
7270 return rc;
7271}
7272
7273
7274/**
7275 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7276 * related.
7277 *
7278 * Raises \#GP(0) if not aligned.
7279 *
7280 * @returns Strict VBox status code.
7281 * @param pIemCpu The IEM per CPU data.
7282 * @param pu128Dst Where to return the qword.
7283 * @param iSegReg The index of the segment register to use for
7284 * this access. The base and limits are checked.
7285 * @param GCPtrMem The address of the guest memory.
7286 */
7287IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7288{
7289 /* The lazy approach for now... */
7290 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7291 if ( (GCPtrMem & 15)
7292 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7293 return iemRaiseGeneralProtectionFault0(pIemCpu);
7294
7295 uint128_t const *pu128Src;
7296 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7297 if (rc == VINF_SUCCESS)
7298 {
7299 *pu128Dst = *pu128Src;
7300 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7301 }
7302 return rc;
7303}
7304
7305
7306
7307
7308/**
7309 * Fetches a descriptor register (lgdt, lidt).
7310 *
7311 * @returns Strict VBox status code.
7312 * @param pIemCpu The IEM per CPU data.
7313 * @param pcbLimit Where to return the limit.
7314 * @param pGCPTrBase Where to return the base.
7315 * @param iSegReg The index of the segment register to use for
7316 * this access. The base and limits are checked.
7317 * @param GCPtrMem The address of the guest memory.
7318 * @param enmOpSize The effective operand size.
7319 */
7320IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7321 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7322{
7323 uint8_t const *pu8Src;
7324 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7325 (void **)&pu8Src,
7326 enmOpSize == IEMMODE_64BIT
7327 ? 2 + 8
7328 : enmOpSize == IEMMODE_32BIT
7329 ? 2 + 4
7330 : 2 + 3,
7331 iSegReg,
7332 GCPtrMem,
7333 IEM_ACCESS_DATA_R);
7334 if (rcStrict == VINF_SUCCESS)
7335 {
7336 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7337 switch (enmOpSize)
7338 {
7339 case IEMMODE_16BIT:
7340 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7341 break;
7342 case IEMMODE_32BIT:
7343 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7344 break;
7345 case IEMMODE_64BIT:
7346 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7347 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7348 break;
7349
7350 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7351 }
7352 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7353 }
7354 return rcStrict;
7355}
7356
7357
7358
7359/**
7360 * Stores a data byte.
7361 *
7362 * @returns Strict VBox status code.
7363 * @param pIemCpu The IEM per CPU data.
7364 * @param iSegReg The index of the segment register to use for
7365 * this access. The base and limits are checked.
7366 * @param GCPtrMem The address of the guest memory.
7367 * @param u8Value The value to store.
7368 */
7369IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7370{
7371 /* The lazy approach for now... */
7372 uint8_t *pu8Dst;
7373 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7374 if (rc == VINF_SUCCESS)
7375 {
7376 *pu8Dst = u8Value;
7377 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7378 }
7379 return rc;
7380}
7381
7382
7383/**
7384 * Stores a data word.
7385 *
7386 * @returns Strict VBox status code.
7387 * @param pIemCpu The IEM per CPU data.
7388 * @param iSegReg The index of the segment register to use for
7389 * this access. The base and limits are checked.
7390 * @param GCPtrMem The address of the guest memory.
7391 * @param u16Value The value to store.
7392 */
7393IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7394{
7395 /* The lazy approach for now... */
7396 uint16_t *pu16Dst;
7397 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7398 if (rc == VINF_SUCCESS)
7399 {
7400 *pu16Dst = u16Value;
7401 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7402 }
7403 return rc;
7404}
7405
7406
7407/**
7408 * Stores a data dword.
7409 *
7410 * @returns Strict VBox status code.
7411 * @param pIemCpu The IEM per CPU data.
7412 * @param iSegReg The index of the segment register to use for
7413 * this access. The base and limits are checked.
7414 * @param GCPtrMem The address of the guest memory.
7415 * @param u32Value The value to store.
7416 */
7417IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7418{
7419 /* The lazy approach for now... */
7420 uint32_t *pu32Dst;
7421 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7422 if (rc == VINF_SUCCESS)
7423 {
7424 *pu32Dst = u32Value;
7425 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7426 }
7427 return rc;
7428}
7429
7430
7431/**
7432 * Stores a data qword.
7433 *
7434 * @returns Strict VBox status code.
7435 * @param pIemCpu The IEM per CPU data.
7436 * @param iSegReg The index of the segment register to use for
7437 * this access. The base and limits are checked.
7438 * @param GCPtrMem The address of the guest memory.
7439 * @param u64Value The value to store.
7440 */
7441IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7442{
7443 /* The lazy approach for now... */
7444 uint64_t *pu64Dst;
7445 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7446 if (rc == VINF_SUCCESS)
7447 {
7448 *pu64Dst = u64Value;
7449 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7450 }
7451 return rc;
7452}
7453
7454
7455/**
7456 * Stores a data dqword.
7457 *
7458 * @returns Strict VBox status code.
7459 * @param pIemCpu The IEM per CPU data.
7460 * @param iSegReg The index of the segment register to use for
7461 * this access. The base and limits are checked.
7462 * @param GCPtrMem The address of the guest memory.
7463 * @param u64Value The value to store.
7464 */
7465IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7466{
7467 /* The lazy approach for now... */
7468 uint128_t *pu128Dst;
7469 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7470 if (rc == VINF_SUCCESS)
7471 {
7472 *pu128Dst = u128Value;
7473 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7474 }
7475 return rc;
7476}
7477
7478
7479/**
7480 * Stores a data dqword, SSE aligned.
7481 *
7482 * @returns Strict VBox status code.
7483 * @param pIemCpu The IEM per CPU data.
7484 * @param iSegReg The index of the segment register to use for
7485 * this access. The base and limits are checked.
7486 * @param GCPtrMem The address of the guest memory.
7487 * @param u64Value The value to store.
7488 */
7489IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7490{
7491 /* The lazy approach for now... */
7492 if ( (GCPtrMem & 15)
7493 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7494 return iemRaiseGeneralProtectionFault0(pIemCpu);
7495
7496 uint128_t *pu128Dst;
7497 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7498 if (rc == VINF_SUCCESS)
7499 {
7500 *pu128Dst = u128Value;
7501 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7502 }
7503 return rc;
7504}
7505
7506
7507/**
7508 * Stores a descriptor register (sgdt, sidt).
7509 *
7510 * @returns Strict VBox status code.
7511 * @param pIemCpu The IEM per CPU data.
7512 * @param cbLimit The limit.
7513 * @param GCPTrBase The base address.
7514 * @param iSegReg The index of the segment register to use for
7515 * this access. The base and limits are checked.
7516 * @param GCPtrMem The address of the guest memory.
7517 * @param enmOpSize The effective operand size.
7518 */
7519IEM_STATIC VBOXSTRICTRC
7520iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7521{
7522 uint8_t *pu8Src;
7523 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7524 (void **)&pu8Src,
7525 enmOpSize == IEMMODE_64BIT
7526 ? 2 + 8
7527 : enmOpSize == IEMMODE_32BIT
7528 ? 2 + 4
7529 : 2 + 3,
7530 iSegReg,
7531 GCPtrMem,
7532 IEM_ACCESS_DATA_W);
7533 if (rcStrict == VINF_SUCCESS)
7534 {
7535 pu8Src[0] = RT_BYTE1(cbLimit);
7536 pu8Src[1] = RT_BYTE2(cbLimit);
7537 pu8Src[2] = RT_BYTE1(GCPtrBase);
7538 pu8Src[3] = RT_BYTE2(GCPtrBase);
7539 pu8Src[4] = RT_BYTE3(GCPtrBase);
7540 if (enmOpSize == IEMMODE_16BIT)
7541 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7542 else
7543 {
7544 pu8Src[5] = RT_BYTE4(GCPtrBase);
7545 if (enmOpSize == IEMMODE_64BIT)
7546 {
7547 pu8Src[6] = RT_BYTE5(GCPtrBase);
7548 pu8Src[7] = RT_BYTE6(GCPtrBase);
7549 pu8Src[8] = RT_BYTE7(GCPtrBase);
7550 pu8Src[9] = RT_BYTE8(GCPtrBase);
7551 }
7552 }
7553 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7554 }
7555 return rcStrict;
7556}
7557
7558
7559/**
7560 * Pushes a word onto the stack.
7561 *
7562 * @returns Strict VBox status code.
7563 * @param pIemCpu The IEM per CPU data.
7564 * @param u16Value The value to push.
7565 */
7566IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7567{
7568 /* Increment the stack pointer. */
7569 uint64_t uNewRsp;
7570 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7571 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7572
7573 /* Write the word the lazy way. */
7574 uint16_t *pu16Dst;
7575 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7576 if (rc == VINF_SUCCESS)
7577 {
7578 *pu16Dst = u16Value;
7579 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7580 }
7581
7582 /* Commit the new RSP value unless we an access handler made trouble. */
7583 if (rc == VINF_SUCCESS)
7584 pCtx->rsp = uNewRsp;
7585
7586 return rc;
7587}
7588
7589
7590/**
7591 * Pushes a dword onto the stack.
7592 *
7593 * @returns Strict VBox status code.
7594 * @param pIemCpu The IEM per CPU data.
7595 * @param u32Value The value to push.
7596 */
7597IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7598{
7599 /* Increment the stack pointer. */
7600 uint64_t uNewRsp;
7601 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7602 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7603
7604 /* Write the dword the lazy way. */
7605 uint32_t *pu32Dst;
7606 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7607 if (rc == VINF_SUCCESS)
7608 {
7609 *pu32Dst = u32Value;
7610 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7611 }
7612
7613 /* Commit the new RSP value unless we an access handler made trouble. */
7614 if (rc == VINF_SUCCESS)
7615 pCtx->rsp = uNewRsp;
7616
7617 return rc;
7618}
7619
7620
7621/**
7622 * Pushes a dword segment register value onto the stack.
7623 *
7624 * @returns Strict VBox status code.
7625 * @param pIemCpu The IEM per CPU data.
7626 * @param u16Value The value to push.
7627 */
7628IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7629{
7630 /* Increment the stack pointer. */
7631 uint64_t uNewRsp;
7632 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7633 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7634
7635 VBOXSTRICTRC rc;
7636 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7637 {
7638 /* The recompiler writes a full dword. */
7639 uint32_t *pu32Dst;
7640 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7641 if (rc == VINF_SUCCESS)
7642 {
7643 *pu32Dst = u32Value;
7644 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7645 }
7646 }
7647 else
7648 {
7649 /* The intel docs talks about zero extending the selector register
7650 value. My actual intel CPU here might be zero extending the value
7651 but it still only writes the lower word... */
7652 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7653 * happens when crossing an electric page boundrary, is the high word
7654 * checked for write accessibility or not? Probably it is. What about
7655 * segment limits? */
7656 uint16_t *pu16Dst;
7657 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7658 if (rc == VINF_SUCCESS)
7659 {
7660 *pu16Dst = (uint16_t)u32Value;
7661 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7662 }
7663 }
7664
7665 /* Commit the new RSP value unless we an access handler made trouble. */
7666 if (rc == VINF_SUCCESS)
7667 pCtx->rsp = uNewRsp;
7668
7669 return rc;
7670}
7671
7672
7673/**
7674 * Pushes a qword onto the stack.
7675 *
7676 * @returns Strict VBox status code.
7677 * @param pIemCpu The IEM per CPU data.
7678 * @param u64Value The value to push.
7679 */
7680IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7681{
7682 /* Increment the stack pointer. */
7683 uint64_t uNewRsp;
7684 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7685 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7686
7687 /* Write the word the lazy way. */
7688 uint64_t *pu64Dst;
7689 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7690 if (rc == VINF_SUCCESS)
7691 {
7692 *pu64Dst = u64Value;
7693 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7694 }
7695
7696 /* Commit the new RSP value unless we an access handler made trouble. */
7697 if (rc == VINF_SUCCESS)
7698 pCtx->rsp = uNewRsp;
7699
7700 return rc;
7701}
7702
7703
7704/**
7705 * Pops a word from the stack.
7706 *
7707 * @returns Strict VBox status code.
7708 * @param pIemCpu The IEM per CPU data.
7709 * @param pu16Value Where to store the popped value.
7710 */
7711IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7712{
7713 /* Increment the stack pointer. */
7714 uint64_t uNewRsp;
7715 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7716 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7717
7718 /* Write the word the lazy way. */
7719 uint16_t const *pu16Src;
7720 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7721 if (rc == VINF_SUCCESS)
7722 {
7723 *pu16Value = *pu16Src;
7724 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7725
7726 /* Commit the new RSP value. */
7727 if (rc == VINF_SUCCESS)
7728 pCtx->rsp = uNewRsp;
7729 }
7730
7731 return rc;
7732}
7733
7734
7735/**
7736 * Pops a dword from the stack.
7737 *
7738 * @returns Strict VBox status code.
7739 * @param pIemCpu The IEM per CPU data.
7740 * @param pu32Value Where to store the popped value.
7741 */
7742IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7743{
7744 /* Increment the stack pointer. */
7745 uint64_t uNewRsp;
7746 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7747 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7748
7749 /* Write the word the lazy way. */
7750 uint32_t const *pu32Src;
7751 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7752 if (rc == VINF_SUCCESS)
7753 {
7754 *pu32Value = *pu32Src;
7755 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7756
7757 /* Commit the new RSP value. */
7758 if (rc == VINF_SUCCESS)
7759 pCtx->rsp = uNewRsp;
7760 }
7761
7762 return rc;
7763}
7764
7765
7766/**
7767 * Pops a qword from the stack.
7768 *
7769 * @returns Strict VBox status code.
7770 * @param pIemCpu The IEM per CPU data.
7771 * @param pu64Value Where to store the popped value.
7772 */
7773IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7774{
7775 /* Increment the stack pointer. */
7776 uint64_t uNewRsp;
7777 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7778 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7779
7780 /* Write the word the lazy way. */
7781 uint64_t const *pu64Src;
7782 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7783 if (rc == VINF_SUCCESS)
7784 {
7785 *pu64Value = *pu64Src;
7786 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7787
7788 /* Commit the new RSP value. */
7789 if (rc == VINF_SUCCESS)
7790 pCtx->rsp = uNewRsp;
7791 }
7792
7793 return rc;
7794}
7795
7796
7797/**
7798 * Pushes a word onto the stack, using a temporary stack pointer.
7799 *
7800 * @returns Strict VBox status code.
7801 * @param pIemCpu The IEM per CPU data.
7802 * @param u16Value The value to push.
7803 * @param pTmpRsp Pointer to the temporary stack pointer.
7804 */
7805IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7806{
7807 /* Increment the stack pointer. */
7808 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7809 RTUINT64U NewRsp = *pTmpRsp;
7810 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7811
7812 /* Write the word the lazy way. */
7813 uint16_t *pu16Dst;
7814 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7815 if (rc == VINF_SUCCESS)
7816 {
7817 *pu16Dst = u16Value;
7818 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7819 }
7820
7821 /* Commit the new RSP value unless we an access handler made trouble. */
7822 if (rc == VINF_SUCCESS)
7823 *pTmpRsp = NewRsp;
7824
7825 return rc;
7826}
7827
7828
7829/**
7830 * Pushes a dword onto the stack, using a temporary stack pointer.
7831 *
7832 * @returns Strict VBox status code.
7833 * @param pIemCpu The IEM per CPU data.
7834 * @param u32Value The value to push.
7835 * @param pTmpRsp Pointer to the temporary stack pointer.
7836 */
7837IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7838{
7839 /* Increment the stack pointer. */
7840 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7841 RTUINT64U NewRsp = *pTmpRsp;
7842 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7843
7844 /* Write the word the lazy way. */
7845 uint32_t *pu32Dst;
7846 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7847 if (rc == VINF_SUCCESS)
7848 {
7849 *pu32Dst = u32Value;
7850 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7851 }
7852
7853 /* Commit the new RSP value unless we an access handler made trouble. */
7854 if (rc == VINF_SUCCESS)
7855 *pTmpRsp = NewRsp;
7856
7857 return rc;
7858}
7859
7860
7861/**
7862 * Pushes a dword onto the stack, using a temporary stack pointer.
7863 *
7864 * @returns Strict VBox status code.
7865 * @param pIemCpu The IEM per CPU data.
7866 * @param u64Value The value to push.
7867 * @param pTmpRsp Pointer to the temporary stack pointer.
7868 */
7869IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7870{
7871 /* Increment the stack pointer. */
7872 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7873 RTUINT64U NewRsp = *pTmpRsp;
7874 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7875
7876 /* Write the word the lazy way. */
7877 uint64_t *pu64Dst;
7878 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7879 if (rc == VINF_SUCCESS)
7880 {
7881 *pu64Dst = u64Value;
7882 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7883 }
7884
7885 /* Commit the new RSP value unless we an access handler made trouble. */
7886 if (rc == VINF_SUCCESS)
7887 *pTmpRsp = NewRsp;
7888
7889 return rc;
7890}
7891
7892
7893/**
7894 * Pops a word from the stack, using a temporary stack pointer.
7895 *
7896 * @returns Strict VBox status code.
7897 * @param pIemCpu The IEM per CPU data.
7898 * @param pu16Value Where to store the popped value.
7899 * @param pTmpRsp Pointer to the temporary stack pointer.
7900 */
7901IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7902{
7903 /* Increment the stack pointer. */
7904 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7905 RTUINT64U NewRsp = *pTmpRsp;
7906 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7907
7908 /* Write the word the lazy way. */
7909 uint16_t const *pu16Src;
7910 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7911 if (rc == VINF_SUCCESS)
7912 {
7913 *pu16Value = *pu16Src;
7914 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7915
7916 /* Commit the new RSP value. */
7917 if (rc == VINF_SUCCESS)
7918 *pTmpRsp = NewRsp;
7919 }
7920
7921 return rc;
7922}
7923
7924
7925/**
7926 * Pops a dword from the stack, using a temporary stack pointer.
7927 *
7928 * @returns Strict VBox status code.
7929 * @param pIemCpu The IEM per CPU data.
7930 * @param pu32Value Where to store the popped value.
7931 * @param pTmpRsp Pointer to the temporary stack pointer.
7932 */
7933IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7934{
7935 /* Increment the stack pointer. */
7936 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7937 RTUINT64U NewRsp = *pTmpRsp;
7938 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7939
7940 /* Write the word the lazy way. */
7941 uint32_t const *pu32Src;
7942 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7943 if (rc == VINF_SUCCESS)
7944 {
7945 *pu32Value = *pu32Src;
7946 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7947
7948 /* Commit the new RSP value. */
7949 if (rc == VINF_SUCCESS)
7950 *pTmpRsp = NewRsp;
7951 }
7952
7953 return rc;
7954}
7955
7956
7957/**
7958 * Pops a qword from the stack, using a temporary stack pointer.
7959 *
7960 * @returns Strict VBox status code.
7961 * @param pIemCpu The IEM per CPU data.
7962 * @param pu64Value Where to store the popped value.
7963 * @param pTmpRsp Pointer to the temporary stack pointer.
7964 */
7965IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
7966{
7967 /* Increment the stack pointer. */
7968 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7969 RTUINT64U NewRsp = *pTmpRsp;
7970 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7971
7972 /* Write the word the lazy way. */
7973 uint64_t const *pu64Src;
7974 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7975 if (rcStrict == VINF_SUCCESS)
7976 {
7977 *pu64Value = *pu64Src;
7978 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7979
7980 /* Commit the new RSP value. */
7981 if (rcStrict == VINF_SUCCESS)
7982 *pTmpRsp = NewRsp;
7983 }
7984
7985 return rcStrict;
7986}
7987
7988
7989/**
7990 * Begin a special stack push (used by interrupt, exceptions and such).
7991 *
7992 * This will raise #SS or #PF if appropriate.
7993 *
7994 * @returns Strict VBox status code.
7995 * @param pIemCpu The IEM per CPU data.
7996 * @param cbMem The number of bytes to push onto the stack.
7997 * @param ppvMem Where to return the pointer to the stack memory.
7998 * As with the other memory functions this could be
7999 * direct access or bounce buffered access, so
8000 * don't commit register until the commit call
8001 * succeeds.
8002 * @param puNewRsp Where to return the new RSP value. This must be
8003 * passed unchanged to
8004 * iemMemStackPushCommitSpecial().
8005 */
8006IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8007{
8008 Assert(cbMem < UINT8_MAX);
8009 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8010 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8011 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8012}
8013
8014
8015/**
8016 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8017 *
8018 * This will update the rSP.
8019 *
8020 * @returns Strict VBox status code.
8021 * @param pIemCpu The IEM per CPU data.
8022 * @param pvMem The pointer returned by
8023 * iemMemStackPushBeginSpecial().
8024 * @param uNewRsp The new RSP value returned by
8025 * iemMemStackPushBeginSpecial().
8026 */
8027IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8028{
8029 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8030 if (rcStrict == VINF_SUCCESS)
8031 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8032 return rcStrict;
8033}
8034
8035
8036/**
8037 * Begin a special stack pop (used by iret, retf and such).
8038 *
8039 * This will raise \#SS or \#PF if appropriate.
8040 *
8041 * @returns Strict VBox status code.
8042 * @param pIemCpu The IEM per CPU data.
8043 * @param cbMem The number of bytes to push onto the stack.
8044 * @param ppvMem Where to return the pointer to the stack memory.
8045 * @param puNewRsp Where to return the new RSP value. This must be
8046 * passed unchanged to
8047 * iemMemStackPopCommitSpecial() or applied
8048 * manually if iemMemStackPopDoneSpecial() is used.
8049 */
8050IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8051{
8052 Assert(cbMem < UINT8_MAX);
8053 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8054 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8055 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8056}
8057
8058
8059/**
8060 * Continue a special stack pop (used by iret and retf).
8061 *
8062 * This will raise \#SS or \#PF if appropriate.
8063 *
8064 * @returns Strict VBox status code.
8065 * @param pIemCpu The IEM per CPU data.
8066 * @param cbMem The number of bytes to push onto the stack.
8067 * @param ppvMem Where to return the pointer to the stack memory.
8068 * @param puNewRsp Where to return the new RSP value. This must be
8069 * passed unchanged to
8070 * iemMemStackPopCommitSpecial() or applied
8071 * manually if iemMemStackPopDoneSpecial() is used.
8072 */
8073IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8074{
8075 Assert(cbMem < UINT8_MAX);
8076 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8077 RTUINT64U NewRsp;
8078 NewRsp.u = *puNewRsp;
8079 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8080 *puNewRsp = NewRsp.u;
8081 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8082}
8083
8084
8085/**
8086 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8087 *
8088 * This will update the rSP.
8089 *
8090 * @returns Strict VBox status code.
8091 * @param pIemCpu The IEM per CPU data.
8092 * @param pvMem The pointer returned by
8093 * iemMemStackPopBeginSpecial().
8094 * @param uNewRsp The new RSP value returned by
8095 * iemMemStackPopBeginSpecial().
8096 */
8097IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8098{
8099 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8100 if (rcStrict == VINF_SUCCESS)
8101 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8102 return rcStrict;
8103}
8104
8105
8106/**
8107 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8108 * iemMemStackPopContinueSpecial).
8109 *
8110 * The caller will manually commit the rSP.
8111 *
8112 * @returns Strict VBox status code.
8113 * @param pIemCpu The IEM per CPU data.
8114 * @param pvMem The pointer returned by
8115 * iemMemStackPopBeginSpecial() or
8116 * iemMemStackPopContinueSpecial().
8117 */
8118IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8119{
8120 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8121}
8122
8123
8124/**
8125 * Fetches a system table byte.
8126 *
8127 * @returns Strict VBox status code.
8128 * @param pIemCpu The IEM per CPU data.
8129 * @param pbDst Where to return the byte.
8130 * @param iSegReg The index of the segment register to use for
8131 * this access. The base and limits are checked.
8132 * @param GCPtrMem The address of the guest memory.
8133 */
8134IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8135{
8136 /* The lazy approach for now... */
8137 uint8_t const *pbSrc;
8138 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8139 if (rc == VINF_SUCCESS)
8140 {
8141 *pbDst = *pbSrc;
8142 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8143 }
8144 return rc;
8145}
8146
8147
8148/**
8149 * Fetches a system table word.
8150 *
8151 * @returns Strict VBox status code.
8152 * @param pIemCpu The IEM per CPU data.
8153 * @param pu16Dst Where to return the word.
8154 * @param iSegReg The index of the segment register to use for
8155 * this access. The base and limits are checked.
8156 * @param GCPtrMem The address of the guest memory.
8157 */
8158IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8159{
8160 /* The lazy approach for now... */
8161 uint16_t const *pu16Src;
8162 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8163 if (rc == VINF_SUCCESS)
8164 {
8165 *pu16Dst = *pu16Src;
8166 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8167 }
8168 return rc;
8169}
8170
8171
8172/**
8173 * Fetches a system table dword.
8174 *
8175 * @returns Strict VBox status code.
8176 * @param pIemCpu The IEM per CPU data.
8177 * @param pu32Dst Where to return the dword.
8178 * @param iSegReg The index of the segment register to use for
8179 * this access. The base and limits are checked.
8180 * @param GCPtrMem The address of the guest memory.
8181 */
8182IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8183{
8184 /* The lazy approach for now... */
8185 uint32_t const *pu32Src;
8186 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8187 if (rc == VINF_SUCCESS)
8188 {
8189 *pu32Dst = *pu32Src;
8190 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8191 }
8192 return rc;
8193}
8194
8195
8196/**
8197 * Fetches a system table qword.
8198 *
8199 * @returns Strict VBox status code.
8200 * @param pIemCpu The IEM per CPU data.
8201 * @param pu64Dst Where to return the qword.
8202 * @param iSegReg The index of the segment register to use for
8203 * this access. The base and limits are checked.
8204 * @param GCPtrMem The address of the guest memory.
8205 */
8206IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8207{
8208 /* The lazy approach for now... */
8209 uint64_t const *pu64Src;
8210 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8211 if (rc == VINF_SUCCESS)
8212 {
8213 *pu64Dst = *pu64Src;
8214 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8215 }
8216 return rc;
8217}
8218
8219
8220/**
8221 * Fetches a descriptor table entry with caller specified error code.
8222 *
8223 * @returns Strict VBox status code.
8224 * @param pIemCpu The IEM per CPU.
8225 * @param pDesc Where to return the descriptor table entry.
8226 * @param uSel The selector which table entry to fetch.
8227 * @param uXcpt The exception to raise on table lookup error.
8228 * @param uErrorCode The error code associated with the exception.
8229 */
8230IEM_STATIC VBOXSTRICTRC
8231iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8232{
8233 AssertPtr(pDesc);
8234 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8235
8236 /** @todo did the 286 require all 8 bytes to be accessible? */
8237 /*
8238 * Get the selector table base and check bounds.
8239 */
8240 RTGCPTR GCPtrBase;
8241 if (uSel & X86_SEL_LDT)
8242 {
8243 if ( !pCtx->ldtr.Attr.n.u1Present
8244 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8245 {
8246 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8247 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8248 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8249 uErrorCode, 0);
8250 }
8251
8252 Assert(pCtx->ldtr.Attr.n.u1Present);
8253 GCPtrBase = pCtx->ldtr.u64Base;
8254 }
8255 else
8256 {
8257 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8258 {
8259 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8260 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8261 uErrorCode, 0);
8262 }
8263 GCPtrBase = pCtx->gdtr.pGdt;
8264 }
8265
8266 /*
8267 * Read the legacy descriptor and maybe the long mode extensions if
8268 * required.
8269 */
8270 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8271 if (rcStrict == VINF_SUCCESS)
8272 {
8273 if ( !IEM_IS_LONG_MODE(pIemCpu)
8274 || pDesc->Legacy.Gen.u1DescType)
8275 pDesc->Long.au64[1] = 0;
8276 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8277 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8278 else
8279 {
8280 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8281 /** @todo is this the right exception? */
8282 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8283 }
8284 }
8285 return rcStrict;
8286}
8287
8288
8289/**
8290 * Fetches a descriptor table entry.
8291 *
8292 * @returns Strict VBox status code.
8293 * @param pIemCpu The IEM per CPU.
8294 * @param pDesc Where to return the descriptor table entry.
8295 * @param uSel The selector which table entry to fetch.
8296 * @param uXcpt The exception to raise on table lookup error.
8297 */
8298IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8299{
8300 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8301}
8302
8303
8304/**
8305 * Fakes a long mode stack selector for SS = 0.
8306 *
8307 * @param pDescSs Where to return the fake stack descriptor.
8308 * @param uDpl The DPL we want.
8309 */
8310IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8311{
8312 pDescSs->Long.au64[0] = 0;
8313 pDescSs->Long.au64[1] = 0;
8314 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8315 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8316 pDescSs->Long.Gen.u2Dpl = uDpl;
8317 pDescSs->Long.Gen.u1Present = 1;
8318 pDescSs->Long.Gen.u1Long = 1;
8319}
8320
8321
8322/**
8323 * Marks the selector descriptor as accessed (only non-system descriptors).
8324 *
8325 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8326 * will therefore skip the limit checks.
8327 *
8328 * @returns Strict VBox status code.
8329 * @param pIemCpu The IEM per CPU.
8330 * @param uSel The selector.
8331 */
8332IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8333{
8334 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8335
8336 /*
8337 * Get the selector table base and calculate the entry address.
8338 */
8339 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8340 ? pCtx->ldtr.u64Base
8341 : pCtx->gdtr.pGdt;
8342 GCPtr += uSel & X86_SEL_MASK;
8343
8344 /*
8345 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8346 * ugly stuff to avoid this. This will make sure it's an atomic access
8347 * as well more or less remove any question about 8-bit or 32-bit accesss.
8348 */
8349 VBOXSTRICTRC rcStrict;
8350 uint32_t volatile *pu32;
8351 if ((GCPtr & 3) == 0)
8352 {
8353 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8354 GCPtr += 2 + 2;
8355 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8356 if (rcStrict != VINF_SUCCESS)
8357 return rcStrict;
8358 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8359 }
8360 else
8361 {
8362 /* The misaligned GDT/LDT case, map the whole thing. */
8363 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8364 if (rcStrict != VINF_SUCCESS)
8365 return rcStrict;
8366 switch ((uintptr_t)pu32 & 3)
8367 {
8368 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8369 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8370 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8371 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8372 }
8373 }
8374
8375 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8376}
8377
8378/** @} */
8379
8380
8381/*
8382 * Include the C/C++ implementation of instruction.
8383 */
8384#include "IEMAllCImpl.cpp.h"
8385
8386
8387
8388/** @name "Microcode" macros.
8389 *
8390 * The idea is that we should be able to use the same code to interpret
8391 * instructions as well as recompiler instructions. Thus this obfuscation.
8392 *
8393 * @{
8394 */
8395#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8396#define IEM_MC_END() }
8397#define IEM_MC_PAUSE() do {} while (0)
8398#define IEM_MC_CONTINUE() do {} while (0)
8399
8400/** Internal macro. */
8401#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8402 do \
8403 { \
8404 VBOXSTRICTRC rcStrict2 = a_Expr; \
8405 if (rcStrict2 != VINF_SUCCESS) \
8406 return rcStrict2; \
8407 } while (0)
8408
8409#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8410#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8411#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8412#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8413#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8414#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8415#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8416
8417#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8418#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8419 do { \
8420 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8421 return iemRaiseDeviceNotAvailable(pIemCpu); \
8422 } while (0)
8423#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8424 do { \
8425 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8426 return iemRaiseMathFault(pIemCpu); \
8427 } while (0)
8428#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8429 do { \
8430 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8431 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8432 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8433 return iemRaiseUndefinedOpcode(pIemCpu); \
8434 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8435 return iemRaiseDeviceNotAvailable(pIemCpu); \
8436 } while (0)
8437#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8438 do { \
8439 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8440 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8441 return iemRaiseUndefinedOpcode(pIemCpu); \
8442 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8443 return iemRaiseDeviceNotAvailable(pIemCpu); \
8444 } while (0)
8445#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8446 do { \
8447 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8448 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8449 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8450 return iemRaiseUndefinedOpcode(pIemCpu); \
8451 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8452 return iemRaiseDeviceNotAvailable(pIemCpu); \
8453 } while (0)
8454#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8455 do { \
8456 if (pIemCpu->uCpl != 0) \
8457 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8458 } while (0)
8459
8460
8461#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8462#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8463#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8464#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8465#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8466#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8467#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8468 uint32_t a_Name; \
8469 uint32_t *a_pName = &a_Name
8470#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8471 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8472
8473#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8474#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8475
8476#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8477#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8478#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8479#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8480#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8481#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8482#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8483#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8484#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8485#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8486#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8487#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8488#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8489#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8490#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8491#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8492#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8493#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8494#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8495#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8496#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8497#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8498#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8499#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8500#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8501#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8502#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8503#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8504#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8505/** @note Not for IOPL or IF testing or modification. */
8506#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8507#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8508#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8509#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8510
8511#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8512#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8513#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8514#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8515#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8516#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8517#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8518#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8519#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8520#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8521#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8522 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8523
8524#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8525#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8526/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8527 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8528#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8529#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8530/** @note Not for IOPL or IF testing or modification. */
8531#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8532
8533#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8534#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8535#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8536 do { \
8537 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8538 *pu32Reg += (a_u32Value); \
8539 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8540 } while (0)
8541#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8542
8543#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8544#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8545#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8546 do { \
8547 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8548 *pu32Reg -= (a_u32Value); \
8549 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8550 } while (0)
8551#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8552
8553#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8554#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8555#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8556#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8557#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8558#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8559#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8560
8561#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8562#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8563#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8564#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8565
8566#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8567#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8568#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8569
8570#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8571#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8572
8573#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8574#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8575#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8576
8577#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8578#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8579#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8580
8581#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8582
8583#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8584
8585#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8586#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8587#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8588 do { \
8589 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8590 *pu32Reg &= (a_u32Value); \
8591 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8592 } while (0)
8593#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8594
8595#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8596#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8597#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8598 do { \
8599 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8600 *pu32Reg |= (a_u32Value); \
8601 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8602 } while (0)
8603#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8604
8605
8606/** @note Not for IOPL or IF modification. */
8607#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8608/** @note Not for IOPL or IF modification. */
8609#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8610/** @note Not for IOPL or IF modification. */
8611#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8612
8613#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8614
8615
8616#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8617 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8618#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8619 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8620#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8621 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8622#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8623 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8624#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8625 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8626#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8627 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8628#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8629 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8630
8631#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8632 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8633#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8634 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8635#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8636 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8637#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8638 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8639#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8640 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8641 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8642 } while (0)
8643#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8644 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8645 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8646 } while (0)
8647#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8648 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8649#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8650 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8651#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8652 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8653
8654#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8655 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8656#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8657 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8658#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8659 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8660
8661#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8662 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8663#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8664 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8665#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8666 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8667
8668#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8669 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8670#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8671 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8672#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8673 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8674
8675#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8676 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8677
8678#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8680#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8682#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8684#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8685 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8686
8687#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8688 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8689#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8690 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8691#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8693
8694#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8696#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8698
8699
8700
8701#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8702 do { \
8703 uint8_t u8Tmp; \
8704 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8705 (a_u16Dst) = u8Tmp; \
8706 } while (0)
8707#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8708 do { \
8709 uint8_t u8Tmp; \
8710 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8711 (a_u32Dst) = u8Tmp; \
8712 } while (0)
8713#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8714 do { \
8715 uint8_t u8Tmp; \
8716 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8717 (a_u64Dst) = u8Tmp; \
8718 } while (0)
8719#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8720 do { \
8721 uint16_t u16Tmp; \
8722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8723 (a_u32Dst) = u16Tmp; \
8724 } while (0)
8725#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8726 do { \
8727 uint16_t u16Tmp; \
8728 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8729 (a_u64Dst) = u16Tmp; \
8730 } while (0)
8731#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8732 do { \
8733 uint32_t u32Tmp; \
8734 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8735 (a_u64Dst) = u32Tmp; \
8736 } while (0)
8737
8738#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8739 do { \
8740 uint8_t u8Tmp; \
8741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8742 (a_u16Dst) = (int8_t)u8Tmp; \
8743 } while (0)
8744#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8745 do { \
8746 uint8_t u8Tmp; \
8747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8748 (a_u32Dst) = (int8_t)u8Tmp; \
8749 } while (0)
8750#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8751 do { \
8752 uint8_t u8Tmp; \
8753 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8754 (a_u64Dst) = (int8_t)u8Tmp; \
8755 } while (0)
8756#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8757 do { \
8758 uint16_t u16Tmp; \
8759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8760 (a_u32Dst) = (int16_t)u16Tmp; \
8761 } while (0)
8762#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8763 do { \
8764 uint16_t u16Tmp; \
8765 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8766 (a_u64Dst) = (int16_t)u16Tmp; \
8767 } while (0)
8768#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8769 do { \
8770 uint32_t u32Tmp; \
8771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8772 (a_u64Dst) = (int32_t)u32Tmp; \
8773 } while (0)
8774
8775#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8776 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8777#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8778 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8779#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8780 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8781#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8782 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8783
8784#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8785 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8786#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8787 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8788#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8789 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8790#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8791 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8792
8793#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8794#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8795#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8796#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8797#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8798#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8799#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8800 do { \
8801 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8802 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8803 } while (0)
8804
8805#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8806 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8807#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8808 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8809
8810
8811#define IEM_MC_PUSH_U16(a_u16Value) \
8812 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8813#define IEM_MC_PUSH_U32(a_u32Value) \
8814 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8815#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8816 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8817#define IEM_MC_PUSH_U64(a_u64Value) \
8818 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8819
8820#define IEM_MC_POP_U16(a_pu16Value) \
8821 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8822#define IEM_MC_POP_U32(a_pu32Value) \
8823 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8824#define IEM_MC_POP_U64(a_pu64Value) \
8825 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8826
8827/** Maps guest memory for direct or bounce buffered access.
8828 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8829 * @remarks May return.
8830 */
8831#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8832 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8833
8834/** Maps guest memory for direct or bounce buffered access.
8835 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8836 * @remarks May return.
8837 */
8838#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8839 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8840
8841/** Commits the memory and unmaps the guest memory.
8842 * @remarks May return.
8843 */
8844#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8845 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8846
8847/** Commits the memory and unmaps the guest memory unless the FPU status word
8848 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8849 * that would cause FLD not to store.
8850 *
8851 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8852 * store, while \#P will not.
8853 *
8854 * @remarks May in theory return - for now.
8855 */
8856#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8857 do { \
8858 if ( !(a_u16FSW & X86_FSW_ES) \
8859 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8860 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8861 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8862 } while (0)
8863
8864/** Calculate efficient address from R/M. */
8865#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8866 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8867
8868#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8869#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8870#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8871#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8872#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8873#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8874#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8875
8876/**
8877 * Defers the rest of the instruction emulation to a C implementation routine
8878 * and returns, only taking the standard parameters.
8879 *
8880 * @param a_pfnCImpl The pointer to the C routine.
8881 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8882 */
8883#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8884
8885/**
8886 * Defers the rest of instruction emulation to a C implementation routine and
8887 * returns, taking one argument in addition to the standard ones.
8888 *
8889 * @param a_pfnCImpl The pointer to the C routine.
8890 * @param a0 The argument.
8891 */
8892#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8893
8894/**
8895 * Defers the rest of the instruction emulation to a C implementation routine
8896 * and returns, taking two arguments in addition to the standard ones.
8897 *
8898 * @param a_pfnCImpl The pointer to the C routine.
8899 * @param a0 The first extra argument.
8900 * @param a1 The second extra argument.
8901 */
8902#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8903
8904/**
8905 * Defers the rest of the instruction emulation to a C implementation routine
8906 * and returns, taking three arguments in addition to the standard ones.
8907 *
8908 * @param a_pfnCImpl The pointer to the C routine.
8909 * @param a0 The first extra argument.
8910 * @param a1 The second extra argument.
8911 * @param a2 The third extra argument.
8912 */
8913#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8914
8915/**
8916 * Defers the rest of the instruction emulation to a C implementation routine
8917 * and returns, taking four arguments in addition to the standard ones.
8918 *
8919 * @param a_pfnCImpl The pointer to the C routine.
8920 * @param a0 The first extra argument.
8921 * @param a1 The second extra argument.
8922 * @param a2 The third extra argument.
8923 * @param a3 The fourth extra argument.
8924 */
8925#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8926
8927/**
8928 * Defers the rest of the instruction emulation to a C implementation routine
8929 * and returns, taking two arguments in addition to the standard ones.
8930 *
8931 * @param a_pfnCImpl The pointer to the C routine.
8932 * @param a0 The first extra argument.
8933 * @param a1 The second extra argument.
8934 * @param a2 The third extra argument.
8935 * @param a3 The fourth extra argument.
8936 * @param a4 The fifth extra argument.
8937 */
8938#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8939
8940/**
8941 * Defers the entire instruction emulation to a C implementation routine and
8942 * returns, only taking the standard parameters.
8943 *
8944 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8945 *
8946 * @param a_pfnCImpl The pointer to the C routine.
8947 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8948 */
8949#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8950
8951/**
8952 * Defers the entire instruction emulation to a C implementation routine and
8953 * returns, taking one argument in addition to the standard ones.
8954 *
8955 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8956 *
8957 * @param a_pfnCImpl The pointer to the C routine.
8958 * @param a0 The argument.
8959 */
8960#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8961
8962/**
8963 * Defers the entire instruction emulation to a C implementation routine and
8964 * returns, taking two arguments in addition to the standard ones.
8965 *
8966 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8967 *
8968 * @param a_pfnCImpl The pointer to the C routine.
8969 * @param a0 The first extra argument.
8970 * @param a1 The second extra argument.
8971 */
8972#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8973
8974/**
8975 * Defers the entire instruction emulation to a C implementation routine and
8976 * returns, taking three arguments in addition to the standard ones.
8977 *
8978 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8979 *
8980 * @param a_pfnCImpl The pointer to the C routine.
8981 * @param a0 The first extra argument.
8982 * @param a1 The second extra argument.
8983 * @param a2 The third extra argument.
8984 */
8985#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8986
8987/**
8988 * Calls a FPU assembly implementation taking one visible argument.
8989 *
8990 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8991 * @param a0 The first extra argument.
8992 */
8993#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
8994 do { \
8995 iemFpuPrepareUsage(pIemCpu); \
8996 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
8997 } while (0)
8998
8999/**
9000 * Calls a FPU assembly implementation taking two visible arguments.
9001 *
9002 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9003 * @param a0 The first extra argument.
9004 * @param a1 The second extra argument.
9005 */
9006#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9007 do { \
9008 iemFpuPrepareUsage(pIemCpu); \
9009 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9010 } while (0)
9011
9012/**
9013 * Calls a FPU assembly implementation taking three visible arguments.
9014 *
9015 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9016 * @param a0 The first extra argument.
9017 * @param a1 The second extra argument.
9018 * @param a2 The third extra argument.
9019 */
9020#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9021 do { \
9022 iemFpuPrepareUsage(pIemCpu); \
9023 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9024 } while (0)
9025
9026#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9027 do { \
9028 (a_FpuData).FSW = (a_FSW); \
9029 (a_FpuData).r80Result = *(a_pr80Value); \
9030 } while (0)
9031
9032/** Pushes FPU result onto the stack. */
9033#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9034 iemFpuPushResult(pIemCpu, &a_FpuData)
9035/** Pushes FPU result onto the stack and sets the FPUDP. */
9036#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9037 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9038
9039/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9040#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9041 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9042
9043/** Stores FPU result in a stack register. */
9044#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9045 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9046/** Stores FPU result in a stack register and pops the stack. */
9047#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9048 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9049/** Stores FPU result in a stack register and sets the FPUDP. */
9050#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9051 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9052/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9053 * stack. */
9054#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9055 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9056
9057/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9058#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9059 iemFpuUpdateOpcodeAndIp(pIemCpu)
9060/** Free a stack register (for FFREE and FFREEP). */
9061#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9062 iemFpuStackFree(pIemCpu, a_iStReg)
9063/** Increment the FPU stack pointer. */
9064#define IEM_MC_FPU_STACK_INC_TOP() \
9065 iemFpuStackIncTop(pIemCpu)
9066/** Decrement the FPU stack pointer. */
9067#define IEM_MC_FPU_STACK_DEC_TOP() \
9068 iemFpuStackDecTop(pIemCpu)
9069
9070/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9071#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9072 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9073/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9074#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9075 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9076/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9077#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9078 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9079/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9080#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9081 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9082/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9083 * stack. */
9084#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9085 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9086/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9087#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9088 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9089
9090/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9091#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9092 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9093/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9094 * stack. */
9095#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9096 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9097/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9098 * FPUDS. */
9099#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9100 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9101/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9102 * FPUDS. Pops stack. */
9103#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9104 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9105/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9106 * stack twice. */
9107#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9108 iemFpuStackUnderflowThenPopPop(pIemCpu)
9109/** Raises a FPU stack underflow exception for an instruction pushing a result
9110 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9111#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9112 iemFpuStackPushUnderflow(pIemCpu)
9113/** Raises a FPU stack underflow exception for an instruction pushing a result
9114 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9115#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9116 iemFpuStackPushUnderflowTwo(pIemCpu)
9117
9118/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9119 * FPUIP, FPUCS and FOP. */
9120#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9121 iemFpuStackPushOverflow(pIemCpu)
9122/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9123 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9124#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9125 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9126/** Indicates that we (might) have modified the FPU state. */
9127#define IEM_MC_USED_FPU() \
9128 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9129
9130/**
9131 * Calls a MMX assembly implementation taking two visible arguments.
9132 *
9133 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9134 * @param a0 The first extra argument.
9135 * @param a1 The second extra argument.
9136 */
9137#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9138 do { \
9139 iemFpuPrepareUsage(pIemCpu); \
9140 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9141 } while (0)
9142
9143/**
9144 * Calls a MMX assembly implementation taking three visible arguments.
9145 *
9146 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9147 * @param a0 The first extra argument.
9148 * @param a1 The second extra argument.
9149 * @param a2 The third extra argument.
9150 */
9151#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9152 do { \
9153 iemFpuPrepareUsage(pIemCpu); \
9154 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9155 } while (0)
9156
9157
9158/**
9159 * Calls a SSE assembly implementation taking two visible arguments.
9160 *
9161 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9162 * @param a0 The first extra argument.
9163 * @param a1 The second extra argument.
9164 */
9165#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9166 do { \
9167 iemFpuPrepareUsageSse(pIemCpu); \
9168 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9169 } while (0)
9170
9171/**
9172 * Calls a SSE assembly implementation taking three visible arguments.
9173 *
9174 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9175 * @param a0 The first extra argument.
9176 * @param a1 The second extra argument.
9177 * @param a2 The third extra argument.
9178 */
9179#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9180 do { \
9181 iemFpuPrepareUsageSse(pIemCpu); \
9182 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9183 } while (0)
9184
9185
9186/** @note Not for IOPL or IF testing. */
9187#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9188/** @note Not for IOPL or IF testing. */
9189#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9190/** @note Not for IOPL or IF testing. */
9191#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9192/** @note Not for IOPL or IF testing. */
9193#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9194/** @note Not for IOPL or IF testing. */
9195#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9196 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9197 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9198/** @note Not for IOPL or IF testing. */
9199#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9200 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9201 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9202/** @note Not for IOPL or IF testing. */
9203#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9204 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9205 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9206 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9207/** @note Not for IOPL or IF testing. */
9208#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9209 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9210 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9211 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9212#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9213#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9214#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9215/** @note Not for IOPL or IF testing. */
9216#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9217 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9218 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9219/** @note Not for IOPL or IF testing. */
9220#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9221 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9222 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9223/** @note Not for IOPL or IF testing. */
9224#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9225 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9226 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9227/** @note Not for IOPL or IF testing. */
9228#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9229 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9230 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9231/** @note Not for IOPL or IF testing. */
9232#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9233 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9234 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9235/** @note Not for IOPL or IF testing. */
9236#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9237 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9238 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9239#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9240#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9241#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9242 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9243#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9244 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9245#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9246 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9247#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9248 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9249#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9250 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9251#define IEM_MC_IF_FCW_IM() \
9252 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9253
9254#define IEM_MC_ELSE() } else {
9255#define IEM_MC_ENDIF() } do {} while (0)
9256
9257/** @} */
9258
9259
9260/** @name Opcode Debug Helpers.
9261 * @{
9262 */
9263#ifdef DEBUG
9264# define IEMOP_MNEMONIC(a_szMnemonic) \
9265 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9266 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9267# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9268 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9269 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9270#else
9271# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9272# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9273#endif
9274
9275/** @} */
9276
9277
9278/** @name Opcode Helpers.
9279 * @{
9280 */
9281
9282/** The instruction raises an \#UD in real and V8086 mode. */
9283#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9284 do \
9285 { \
9286 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9287 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9288 } while (0)
9289
9290/** The instruction allows no lock prefixing (in this encoding), throw #UD if
9291 * lock prefixed.
9292 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9293#define IEMOP_HLP_NO_LOCK_PREFIX() \
9294 do \
9295 { \
9296 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9297 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9298 } while (0)
9299
9300/** The instruction is not available in 64-bit mode, throw #UD if we're in
9301 * 64-bit mode. */
9302#define IEMOP_HLP_NO_64BIT() \
9303 do \
9304 { \
9305 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9306 return IEMOP_RAISE_INVALID_OPCODE(); \
9307 } while (0)
9308
9309/** The instruction is only available in 64-bit mode, throw #UD if we're not in
9310 * 64-bit mode. */
9311#define IEMOP_HLP_ONLY_64BIT() \
9312 do \
9313 { \
9314 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9315 return IEMOP_RAISE_INVALID_OPCODE(); \
9316 } while (0)
9317
9318/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9319#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9320 do \
9321 { \
9322 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9323 iemRecalEffOpSize64Default(pIemCpu); \
9324 } while (0)
9325
9326/** The instruction has 64-bit operand size if 64-bit mode. */
9327#define IEMOP_HLP_64BIT_OP_SIZE() \
9328 do \
9329 { \
9330 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9331 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9332 } while (0)
9333
9334/** Only a REX prefix immediately preceeding the first opcode byte takes
9335 * effect. This macro helps ensuring this as well as logging bad guest code. */
9336#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9337 do \
9338 { \
9339 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9340 { \
9341 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9342 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9343 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9344 pIemCpu->uRexB = 0; \
9345 pIemCpu->uRexIndex = 0; \
9346 pIemCpu->uRexReg = 0; \
9347 iemRecalEffOpSize(pIemCpu); \
9348 } \
9349 } while (0)
9350
9351/**
9352 * Done decoding.
9353 */
9354#define IEMOP_HLP_DONE_DECODING() \
9355 do \
9356 { \
9357 /*nothing for now, maybe later... */ \
9358 } while (0)
9359
9360/**
9361 * Done decoding, raise \#UD exception if lock prefix present.
9362 */
9363#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9364 do \
9365 { \
9366 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9367 { /* likely */ } \
9368 else \
9369 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9370 } while (0)
9371#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9372 do \
9373 { \
9374 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9375 { /* likely */ } \
9376 else \
9377 { \
9378 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9379 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9380 } \
9381 } while (0)
9382#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9383 do \
9384 { \
9385 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9386 { /* likely */ } \
9387 else \
9388 { \
9389 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9390 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9391 } \
9392 } while (0)
9393/**
9394 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9395 * are present.
9396 */
9397#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9398 do \
9399 { \
9400 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9401 { /* likely */ } \
9402 else \
9403 return IEMOP_RAISE_INVALID_OPCODE(); \
9404 } while (0)
9405
9406
9407/**
9408 * Calculates the effective address of a ModR/M memory operand.
9409 *
9410 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9411 *
9412 * @return Strict VBox status code.
9413 * @param pIemCpu The IEM per CPU data.
9414 * @param bRm The ModRM byte.
9415 * @param cbImm The size of any immediate following the
9416 * effective address opcode bytes. Important for
9417 * RIP relative addressing.
9418 * @param pGCPtrEff Where to return the effective address.
9419 */
9420IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9421{
9422 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9423 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9424#define SET_SS_DEF() \
9425 do \
9426 { \
9427 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9428 pIemCpu->iEffSeg = X86_SREG_SS; \
9429 } while (0)
9430
9431 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9432 {
9433/** @todo Check the effective address size crap! */
9434 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9435 {
9436 uint16_t u16EffAddr;
9437
9438 /* Handle the disp16 form with no registers first. */
9439 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9440 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9441 else
9442 {
9443 /* Get the displacment. */
9444 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9445 {
9446 case 0: u16EffAddr = 0; break;
9447 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9448 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9449 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9450 }
9451
9452 /* Add the base and index registers to the disp. */
9453 switch (bRm & X86_MODRM_RM_MASK)
9454 {
9455 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9456 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9457 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9458 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9459 case 4: u16EffAddr += pCtx->si; break;
9460 case 5: u16EffAddr += pCtx->di; break;
9461 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9462 case 7: u16EffAddr += pCtx->bx; break;
9463 }
9464 }
9465
9466 *pGCPtrEff = u16EffAddr;
9467 }
9468 else
9469 {
9470 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9471 uint32_t u32EffAddr;
9472
9473 /* Handle the disp32 form with no registers first. */
9474 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9475 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9476 else
9477 {
9478 /* Get the register (or SIB) value. */
9479 switch ((bRm & X86_MODRM_RM_MASK))
9480 {
9481 case 0: u32EffAddr = pCtx->eax; break;
9482 case 1: u32EffAddr = pCtx->ecx; break;
9483 case 2: u32EffAddr = pCtx->edx; break;
9484 case 3: u32EffAddr = pCtx->ebx; break;
9485 case 4: /* SIB */
9486 {
9487 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9488
9489 /* Get the index and scale it. */
9490 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9491 {
9492 case 0: u32EffAddr = pCtx->eax; break;
9493 case 1: u32EffAddr = pCtx->ecx; break;
9494 case 2: u32EffAddr = pCtx->edx; break;
9495 case 3: u32EffAddr = pCtx->ebx; break;
9496 case 4: u32EffAddr = 0; /*none */ break;
9497 case 5: u32EffAddr = pCtx->ebp; break;
9498 case 6: u32EffAddr = pCtx->esi; break;
9499 case 7: u32EffAddr = pCtx->edi; break;
9500 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9501 }
9502 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9503
9504 /* add base */
9505 switch (bSib & X86_SIB_BASE_MASK)
9506 {
9507 case 0: u32EffAddr += pCtx->eax; break;
9508 case 1: u32EffAddr += pCtx->ecx; break;
9509 case 2: u32EffAddr += pCtx->edx; break;
9510 case 3: u32EffAddr += pCtx->ebx; break;
9511 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9512 case 5:
9513 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9514 {
9515 u32EffAddr += pCtx->ebp;
9516 SET_SS_DEF();
9517 }
9518 else
9519 {
9520 uint32_t u32Disp;
9521 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9522 u32EffAddr += u32Disp;
9523 }
9524 break;
9525 case 6: u32EffAddr += pCtx->esi; break;
9526 case 7: u32EffAddr += pCtx->edi; break;
9527 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9528 }
9529 break;
9530 }
9531 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9532 case 6: u32EffAddr = pCtx->esi; break;
9533 case 7: u32EffAddr = pCtx->edi; break;
9534 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9535 }
9536
9537 /* Get and add the displacement. */
9538 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9539 {
9540 case 0:
9541 break;
9542 case 1:
9543 {
9544 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9545 u32EffAddr += i8Disp;
9546 break;
9547 }
9548 case 2:
9549 {
9550 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9551 u32EffAddr += u32Disp;
9552 break;
9553 }
9554 default:
9555 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9556 }
9557
9558 }
9559 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9560 *pGCPtrEff = u32EffAddr;
9561 else
9562 {
9563 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9564 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9565 }
9566 }
9567 }
9568 else
9569 {
9570 uint64_t u64EffAddr;
9571
9572 /* Handle the rip+disp32 form with no registers first. */
9573 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9574 {
9575 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9576 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9577 }
9578 else
9579 {
9580 /* Get the register (or SIB) value. */
9581 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9582 {
9583 case 0: u64EffAddr = pCtx->rax; break;
9584 case 1: u64EffAddr = pCtx->rcx; break;
9585 case 2: u64EffAddr = pCtx->rdx; break;
9586 case 3: u64EffAddr = pCtx->rbx; break;
9587 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9588 case 6: u64EffAddr = pCtx->rsi; break;
9589 case 7: u64EffAddr = pCtx->rdi; break;
9590 case 8: u64EffAddr = pCtx->r8; break;
9591 case 9: u64EffAddr = pCtx->r9; break;
9592 case 10: u64EffAddr = pCtx->r10; break;
9593 case 11: u64EffAddr = pCtx->r11; break;
9594 case 13: u64EffAddr = pCtx->r13; break;
9595 case 14: u64EffAddr = pCtx->r14; break;
9596 case 15: u64EffAddr = pCtx->r15; break;
9597 /* SIB */
9598 case 4:
9599 case 12:
9600 {
9601 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9602
9603 /* Get the index and scale it. */
9604 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9605 {
9606 case 0: u64EffAddr = pCtx->rax; break;
9607 case 1: u64EffAddr = pCtx->rcx; break;
9608 case 2: u64EffAddr = pCtx->rdx; break;
9609 case 3: u64EffAddr = pCtx->rbx; break;
9610 case 4: u64EffAddr = 0; /*none */ break;
9611 case 5: u64EffAddr = pCtx->rbp; break;
9612 case 6: u64EffAddr = pCtx->rsi; break;
9613 case 7: u64EffAddr = pCtx->rdi; break;
9614 case 8: u64EffAddr = pCtx->r8; break;
9615 case 9: u64EffAddr = pCtx->r9; break;
9616 case 10: u64EffAddr = pCtx->r10; break;
9617 case 11: u64EffAddr = pCtx->r11; break;
9618 case 12: u64EffAddr = pCtx->r12; break;
9619 case 13: u64EffAddr = pCtx->r13; break;
9620 case 14: u64EffAddr = pCtx->r14; break;
9621 case 15: u64EffAddr = pCtx->r15; break;
9622 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9623 }
9624 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9625
9626 /* add base */
9627 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9628 {
9629 case 0: u64EffAddr += pCtx->rax; break;
9630 case 1: u64EffAddr += pCtx->rcx; break;
9631 case 2: u64EffAddr += pCtx->rdx; break;
9632 case 3: u64EffAddr += pCtx->rbx; break;
9633 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9634 case 6: u64EffAddr += pCtx->rsi; break;
9635 case 7: u64EffAddr += pCtx->rdi; break;
9636 case 8: u64EffAddr += pCtx->r8; break;
9637 case 9: u64EffAddr += pCtx->r9; break;
9638 case 10: u64EffAddr += pCtx->r10; break;
9639 case 11: u64EffAddr += pCtx->r11; break;
9640 case 12: u64EffAddr += pCtx->r12; break;
9641 case 14: u64EffAddr += pCtx->r14; break;
9642 case 15: u64EffAddr += pCtx->r15; break;
9643 /* complicated encodings */
9644 case 5:
9645 case 13:
9646 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9647 {
9648 if (!pIemCpu->uRexB)
9649 {
9650 u64EffAddr += pCtx->rbp;
9651 SET_SS_DEF();
9652 }
9653 else
9654 u64EffAddr += pCtx->r13;
9655 }
9656 else
9657 {
9658 uint32_t u32Disp;
9659 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9660 u64EffAddr += (int32_t)u32Disp;
9661 }
9662 break;
9663 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9664 }
9665 break;
9666 }
9667 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9668 }
9669
9670 /* Get and add the displacement. */
9671 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9672 {
9673 case 0:
9674 break;
9675 case 1:
9676 {
9677 int8_t i8Disp;
9678 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9679 u64EffAddr += i8Disp;
9680 break;
9681 }
9682 case 2:
9683 {
9684 uint32_t u32Disp;
9685 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9686 u64EffAddr += (int32_t)u32Disp;
9687 break;
9688 }
9689 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9690 }
9691
9692 }
9693
9694 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9695 *pGCPtrEff = u64EffAddr;
9696 else
9697 {
9698 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9699 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9700 }
9701 }
9702
9703 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9704 return VINF_SUCCESS;
9705}
9706
9707/** @} */
9708
9709
9710
9711/*
9712 * Include the instructions
9713 */
9714#include "IEMAllInstructions.cpp.h"
9715
9716
9717
9718
9719#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9720
9721/**
9722 * Sets up execution verification mode.
9723 */
9724IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9725{
9726 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9727 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9728
9729 /*
9730 * Always note down the address of the current instruction.
9731 */
9732 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9733 pIemCpu->uOldRip = pOrgCtx->rip;
9734
9735 /*
9736 * Enable verification and/or logging.
9737 */
9738 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9739 if ( fNewNoRem
9740 && ( 0
9741#if 0 /* auto enable on first paged protected mode interrupt */
9742 || ( pOrgCtx->eflags.Bits.u1IF
9743 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9744 && TRPMHasTrap(pVCpu)
9745 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9746#endif
9747#if 0
9748 || ( pOrgCtx->cs == 0x10
9749 && ( pOrgCtx->rip == 0x90119e3e
9750 || pOrgCtx->rip == 0x901d9810)
9751#endif
9752#if 0 /* Auto enable DSL - FPU stuff. */
9753 || ( pOrgCtx->cs == 0x10
9754 && (// pOrgCtx->rip == 0xc02ec07f
9755 //|| pOrgCtx->rip == 0xc02ec082
9756 //|| pOrgCtx->rip == 0xc02ec0c9
9757 0
9758 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9759#endif
9760#if 0 /* Auto enable DSL - fstp st0 stuff. */
9761 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9762#endif
9763#if 0
9764 || pOrgCtx->rip == 0x9022bb3a
9765#endif
9766#if 0
9767 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9768#endif
9769#if 0
9770 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9771 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9772#endif
9773#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9774 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9775 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9776 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9777#endif
9778#if 0 /* NT4SP1 - xadd early boot. */
9779 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9780#endif
9781#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9782 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9783#endif
9784#if 0 /* NT4SP1 - cmpxchg (AMD). */
9785 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9786#endif
9787#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9788 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9789#endif
9790#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9791 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9792
9793#endif
9794#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9795 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9796
9797#endif
9798#if 0 /* NT4SP1 - frstor [ecx] */
9799 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9800#endif
9801#if 0 /* xxxxxx - All long mode code. */
9802 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9803#endif
9804#if 0 /* rep movsq linux 3.7 64-bit boot. */
9805 || (pOrgCtx->rip == 0x0000000000100241)
9806#endif
9807#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9808 || (pOrgCtx->rip == 0x000000000215e240)
9809#endif
9810#if 0 /* DOS's size-overridden iret to v8086. */
9811 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9812#endif
9813 )
9814 )
9815 {
9816 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9817 RTLogFlags(NULL, "enabled");
9818 fNewNoRem = false;
9819 }
9820 if (fNewNoRem != pIemCpu->fNoRem)
9821 {
9822 pIemCpu->fNoRem = fNewNoRem;
9823 if (!fNewNoRem)
9824 {
9825 LogAlways(("Enabling verification mode!\n"));
9826 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9827 }
9828 else
9829 LogAlways(("Disabling verification mode!\n"));
9830 }
9831
9832 /*
9833 * Switch state.
9834 */
9835 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9836 {
9837 static CPUMCTX s_DebugCtx; /* Ugly! */
9838
9839 s_DebugCtx = *pOrgCtx;
9840 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9841 }
9842
9843 /*
9844 * See if there is an interrupt pending in TRPM and inject it if we can.
9845 */
9846 pIemCpu->uInjectCpl = UINT8_MAX;
9847 if ( pOrgCtx->eflags.Bits.u1IF
9848 && TRPMHasTrap(pVCpu)
9849 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9850 {
9851 uint8_t u8TrapNo;
9852 TRPMEVENT enmType;
9853 RTGCUINT uErrCode;
9854 RTGCPTR uCr2;
9855 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9856 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9857 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9858 TRPMResetTrap(pVCpu);
9859 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9860 }
9861
9862 /*
9863 * Reset the counters.
9864 */
9865 pIemCpu->cIOReads = 0;
9866 pIemCpu->cIOWrites = 0;
9867 pIemCpu->fIgnoreRaxRdx = false;
9868 pIemCpu->fOverlappingMovs = false;
9869 pIemCpu->fProblematicMemory = false;
9870 pIemCpu->fUndefinedEFlags = 0;
9871
9872 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9873 {
9874 /*
9875 * Free all verification records.
9876 */
9877 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9878 pIemCpu->pIemEvtRecHead = NULL;
9879 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9880 do
9881 {
9882 while (pEvtRec)
9883 {
9884 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9885 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9886 pIemCpu->pFreeEvtRec = pEvtRec;
9887 pEvtRec = pNext;
9888 }
9889 pEvtRec = pIemCpu->pOtherEvtRecHead;
9890 pIemCpu->pOtherEvtRecHead = NULL;
9891 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9892 } while (pEvtRec);
9893 }
9894}
9895
9896
9897/**
9898 * Allocate an event record.
9899 * @returns Pointer to a record.
9900 */
9901IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9902{
9903 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9904 return NULL;
9905
9906 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9907 if (pEvtRec)
9908 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9909 else
9910 {
9911 if (!pIemCpu->ppIemEvtRecNext)
9912 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9913
9914 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9915 if (!pEvtRec)
9916 return NULL;
9917 }
9918 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9919 pEvtRec->pNext = NULL;
9920 return pEvtRec;
9921}
9922
9923
9924/**
9925 * IOMMMIORead notification.
9926 */
9927VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9928{
9929 PVMCPU pVCpu = VMMGetCpu(pVM);
9930 if (!pVCpu)
9931 return;
9932 PIEMCPU pIemCpu = &pVCpu->iem.s;
9933 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9934 if (!pEvtRec)
9935 return;
9936 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9937 pEvtRec->u.RamRead.GCPhys = GCPhys;
9938 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9939 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9940 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9941}
9942
9943
9944/**
9945 * IOMMMIOWrite notification.
9946 */
9947VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
9948{
9949 PVMCPU pVCpu = VMMGetCpu(pVM);
9950 if (!pVCpu)
9951 return;
9952 PIEMCPU pIemCpu = &pVCpu->iem.s;
9953 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9954 if (!pEvtRec)
9955 return;
9956 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
9957 pEvtRec->u.RamWrite.GCPhys = GCPhys;
9958 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
9959 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
9960 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
9961 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
9962 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
9963 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9964 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9965}
9966
9967
9968/**
9969 * IOMIOPortRead notification.
9970 */
9971VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
9972{
9973 PVMCPU pVCpu = VMMGetCpu(pVM);
9974 if (!pVCpu)
9975 return;
9976 PIEMCPU pIemCpu = &pVCpu->iem.s;
9977 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9978 if (!pEvtRec)
9979 return;
9980 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9981 pEvtRec->u.IOPortRead.Port = Port;
9982 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9983 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9984 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9985}
9986
9987/**
9988 * IOMIOPortWrite notification.
9989 */
9990VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9991{
9992 PVMCPU pVCpu = VMMGetCpu(pVM);
9993 if (!pVCpu)
9994 return;
9995 PIEMCPU pIemCpu = &pVCpu->iem.s;
9996 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9997 if (!pEvtRec)
9998 return;
9999 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10000 pEvtRec->u.IOPortWrite.Port = Port;
10001 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10002 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10003 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10004 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10005}
10006
10007
10008VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10009{
10010 AssertFailed();
10011}
10012
10013
10014VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10015{
10016 AssertFailed();
10017}
10018
10019
10020/**
10021 * Fakes and records an I/O port read.
10022 *
10023 * @returns VINF_SUCCESS.
10024 * @param pIemCpu The IEM per CPU data.
10025 * @param Port The I/O port.
10026 * @param pu32Value Where to store the fake value.
10027 * @param cbValue The size of the access.
10028 */
10029IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10030{
10031 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10032 if (pEvtRec)
10033 {
10034 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10035 pEvtRec->u.IOPortRead.Port = Port;
10036 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
10037 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10038 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10039 }
10040 pIemCpu->cIOReads++;
10041 *pu32Value = 0xcccccccc;
10042 return VINF_SUCCESS;
10043}
10044
10045
10046/**
10047 * Fakes and records an I/O port write.
10048 *
10049 * @returns VINF_SUCCESS.
10050 * @param pIemCpu The IEM per CPU data.
10051 * @param Port The I/O port.
10052 * @param u32Value The value being written.
10053 * @param cbValue The size of the access.
10054 */
10055IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10056{
10057 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10058 if (pEvtRec)
10059 {
10060 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10061 pEvtRec->u.IOPortWrite.Port = Port;
10062 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10063 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10064 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10065 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10066 }
10067 pIemCpu->cIOWrites++;
10068 return VINF_SUCCESS;
10069}
10070
10071
10072/**
10073 * Used to add extra details about a stub case.
10074 * @param pIemCpu The IEM per CPU state.
10075 */
10076IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10077{
10078 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10079 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10080 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10081 char szRegs[4096];
10082 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10083 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10084 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10085 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10086 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10087 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10088 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10089 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10090 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10091 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10092 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10093 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10094 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10095 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10096 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10097 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10098 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10099 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10100 " efer=%016VR{efer}\n"
10101 " pat=%016VR{pat}\n"
10102 " sf_mask=%016VR{sf_mask}\n"
10103 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10104 " lstar=%016VR{lstar}\n"
10105 " star=%016VR{star} cstar=%016VR{cstar}\n"
10106 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10107 );
10108
10109 char szInstr1[256];
10110 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10111 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10112 szInstr1, sizeof(szInstr1), NULL);
10113 char szInstr2[256];
10114 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10115 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10116 szInstr2, sizeof(szInstr2), NULL);
10117
10118 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10119}
10120
10121
10122/**
10123 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10124 * dump to the assertion info.
10125 *
10126 * @param pEvtRec The record to dump.
10127 */
10128IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10129{
10130 switch (pEvtRec->enmEvent)
10131 {
10132 case IEMVERIFYEVENT_IOPORT_READ:
10133 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10134 pEvtRec->u.IOPortWrite.Port,
10135 pEvtRec->u.IOPortWrite.cbValue);
10136 break;
10137 case IEMVERIFYEVENT_IOPORT_WRITE:
10138 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10139 pEvtRec->u.IOPortWrite.Port,
10140 pEvtRec->u.IOPortWrite.cbValue,
10141 pEvtRec->u.IOPortWrite.u32Value);
10142 break;
10143 case IEMVERIFYEVENT_RAM_READ:
10144 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10145 pEvtRec->u.RamRead.GCPhys,
10146 pEvtRec->u.RamRead.cb);
10147 break;
10148 case IEMVERIFYEVENT_RAM_WRITE:
10149 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10150 pEvtRec->u.RamWrite.GCPhys,
10151 pEvtRec->u.RamWrite.cb,
10152 (int)pEvtRec->u.RamWrite.cb,
10153 pEvtRec->u.RamWrite.ab);
10154 break;
10155 default:
10156 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10157 break;
10158 }
10159}
10160
10161
10162/**
10163 * Raises an assertion on the specified record, showing the given message with
10164 * a record dump attached.
10165 *
10166 * @param pIemCpu The IEM per CPU data.
10167 * @param pEvtRec1 The first record.
10168 * @param pEvtRec2 The second record.
10169 * @param pszMsg The message explaining why we're asserting.
10170 */
10171IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10172{
10173 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10174 iemVerifyAssertAddRecordDump(pEvtRec1);
10175 iemVerifyAssertAddRecordDump(pEvtRec2);
10176 iemVerifyAssertMsg2(pIemCpu);
10177 RTAssertPanic();
10178}
10179
10180
10181/**
10182 * Raises an assertion on the specified record, showing the given message with
10183 * a record dump attached.
10184 *
10185 * @param pIemCpu The IEM per CPU data.
10186 * @param pEvtRec1 The first record.
10187 * @param pszMsg The message explaining why we're asserting.
10188 */
10189IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10190{
10191 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10192 iemVerifyAssertAddRecordDump(pEvtRec);
10193 iemVerifyAssertMsg2(pIemCpu);
10194 RTAssertPanic();
10195}
10196
10197
10198/**
10199 * Verifies a write record.
10200 *
10201 * @param pIemCpu The IEM per CPU data.
10202 * @param pEvtRec The write record.
10203 * @param fRem Set if REM was doing the other executing. If clear
10204 * it was HM.
10205 */
10206IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10207{
10208 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10209 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10210 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10211 if ( RT_FAILURE(rc)
10212 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10213 {
10214 /* fend off ins */
10215 if ( !pIemCpu->cIOReads
10216 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10217 || ( pEvtRec->u.RamWrite.cb != 1
10218 && pEvtRec->u.RamWrite.cb != 2
10219 && pEvtRec->u.RamWrite.cb != 4) )
10220 {
10221 /* fend off ROMs and MMIO */
10222 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10223 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10224 {
10225 /* fend off fxsave */
10226 if (pEvtRec->u.RamWrite.cb != 512)
10227 {
10228 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10229 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10230 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10231 RTAssertMsg2Add("%s: %.*Rhxs\n"
10232 "iem: %.*Rhxs\n",
10233 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10234 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10235 iemVerifyAssertAddRecordDump(pEvtRec);
10236 iemVerifyAssertMsg2(pIemCpu);
10237 RTAssertPanic();
10238 }
10239 }
10240 }
10241 }
10242
10243}
10244
10245/**
10246 * Performs the post-execution verfication checks.
10247 */
10248IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10249{
10250 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10251 return;
10252
10253 /*
10254 * Switch back the state.
10255 */
10256 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10257 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10258 Assert(pOrgCtx != pDebugCtx);
10259 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10260
10261 /*
10262 * Execute the instruction in REM.
10263 */
10264 bool fRem = false;
10265 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10266 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10267 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10268#ifdef IEM_VERIFICATION_MODE_FULL_HM
10269 if ( HMIsEnabled(pVM)
10270 && pIemCpu->cIOReads == 0
10271 && pIemCpu->cIOWrites == 0
10272 && !pIemCpu->fProblematicMemory)
10273 {
10274 uint64_t uStartRip = pOrgCtx->rip;
10275 unsigned iLoops = 0;
10276 do
10277 {
10278 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10279 iLoops++;
10280 } while ( rc == VINF_SUCCESS
10281 || ( rc == VINF_EM_DBG_STEPPED
10282 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10283 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10284 || ( pOrgCtx->rip != pDebugCtx->rip
10285 && pIemCpu->uInjectCpl != UINT8_MAX
10286 && iLoops < 8) );
10287 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10288 rc = VINF_SUCCESS;
10289 }
10290#endif
10291 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10292 || rc == VINF_IOM_R3_IOPORT_READ
10293 || rc == VINF_IOM_R3_IOPORT_WRITE
10294 || rc == VINF_IOM_R3_MMIO_READ
10295 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10296 || rc == VINF_IOM_R3_MMIO_WRITE
10297 || rc == VINF_CPUM_R3_MSR_READ
10298 || rc == VINF_CPUM_R3_MSR_WRITE
10299 || rc == VINF_EM_RESCHEDULE
10300 )
10301 {
10302 EMRemLock(pVM);
10303 rc = REMR3EmulateInstruction(pVM, pVCpu);
10304 AssertRC(rc);
10305 EMRemUnlock(pVM);
10306 fRem = true;
10307 }
10308
10309 /*
10310 * Compare the register states.
10311 */
10312 unsigned cDiffs = 0;
10313 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10314 {
10315 //Log(("REM and IEM ends up with different registers!\n"));
10316 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10317
10318# define CHECK_FIELD(a_Field) \
10319 do \
10320 { \
10321 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10322 { \
10323 switch (sizeof(pOrgCtx->a_Field)) \
10324 { \
10325 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10326 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10327 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10328 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10329 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10330 } \
10331 cDiffs++; \
10332 } \
10333 } while (0)
10334# define CHECK_XSTATE_FIELD(a_Field) \
10335 do \
10336 { \
10337 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10338 { \
10339 switch (sizeof(pOrgCtx->a_Field)) \
10340 { \
10341 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10342 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10343 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10344 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10345 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10346 } \
10347 cDiffs++; \
10348 } \
10349 } while (0)
10350
10351# define CHECK_BIT_FIELD(a_Field) \
10352 do \
10353 { \
10354 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10355 { \
10356 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10357 cDiffs++; \
10358 } \
10359 } while (0)
10360
10361# define CHECK_SEL(a_Sel) \
10362 do \
10363 { \
10364 CHECK_FIELD(a_Sel.Sel); \
10365 CHECK_FIELD(a_Sel.Attr.u); \
10366 CHECK_FIELD(a_Sel.u64Base); \
10367 CHECK_FIELD(a_Sel.u32Limit); \
10368 CHECK_FIELD(a_Sel.fFlags); \
10369 } while (0)
10370
10371 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10372 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10373
10374#if 1 /* The recompiler doesn't update these the intel way. */
10375 if (fRem)
10376 {
10377 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10378 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10379 pOrgXState->x87.CS = pDebugXState->x87.CS;
10380 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10381 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10382 pOrgXState->x87.DS = pDebugXState->x87.DS;
10383 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10384 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10385 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10386 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10387 }
10388#endif
10389 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10390 {
10391 RTAssertMsg2Weak(" the FPU state differs\n");
10392 cDiffs++;
10393 CHECK_XSTATE_FIELD(x87.FCW);
10394 CHECK_XSTATE_FIELD(x87.FSW);
10395 CHECK_XSTATE_FIELD(x87.FTW);
10396 CHECK_XSTATE_FIELD(x87.FOP);
10397 CHECK_XSTATE_FIELD(x87.FPUIP);
10398 CHECK_XSTATE_FIELD(x87.CS);
10399 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10400 CHECK_XSTATE_FIELD(x87.FPUDP);
10401 CHECK_XSTATE_FIELD(x87.DS);
10402 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10403 CHECK_XSTATE_FIELD(x87.MXCSR);
10404 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10405 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10406 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10407 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10408 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10409 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10410 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10411 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10412 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10413 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10414 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10415 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10416 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10417 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10418 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10419 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10420 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10421 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10422 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10423 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10424 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10425 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10426 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10427 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10428 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10429 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10430 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10431 }
10432 CHECK_FIELD(rip);
10433 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10434 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10435 {
10436 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10437 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10438 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10439 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10440 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10441 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10442 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10443 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10444 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10445 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10446 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10447 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10448 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10449 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10450 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10451 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10452 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10453 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10454 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10455 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10456 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10457 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10458 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10459 }
10460
10461 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10462 CHECK_FIELD(rax);
10463 CHECK_FIELD(rcx);
10464 if (!pIemCpu->fIgnoreRaxRdx)
10465 CHECK_FIELD(rdx);
10466 CHECK_FIELD(rbx);
10467 CHECK_FIELD(rsp);
10468 CHECK_FIELD(rbp);
10469 CHECK_FIELD(rsi);
10470 CHECK_FIELD(rdi);
10471 CHECK_FIELD(r8);
10472 CHECK_FIELD(r9);
10473 CHECK_FIELD(r10);
10474 CHECK_FIELD(r11);
10475 CHECK_FIELD(r12);
10476 CHECK_FIELD(r13);
10477 CHECK_SEL(cs);
10478 CHECK_SEL(ss);
10479 CHECK_SEL(ds);
10480 CHECK_SEL(es);
10481 CHECK_SEL(fs);
10482 CHECK_SEL(gs);
10483 CHECK_FIELD(cr0);
10484
10485 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10486 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10487 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10488 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10489 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10490 {
10491 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10492 { /* ignore */ }
10493 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10494 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10495 && fRem)
10496 { /* ignore */ }
10497 else
10498 CHECK_FIELD(cr2);
10499 }
10500 CHECK_FIELD(cr3);
10501 CHECK_FIELD(cr4);
10502 CHECK_FIELD(dr[0]);
10503 CHECK_FIELD(dr[1]);
10504 CHECK_FIELD(dr[2]);
10505 CHECK_FIELD(dr[3]);
10506 CHECK_FIELD(dr[6]);
10507 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10508 CHECK_FIELD(dr[7]);
10509 CHECK_FIELD(gdtr.cbGdt);
10510 CHECK_FIELD(gdtr.pGdt);
10511 CHECK_FIELD(idtr.cbIdt);
10512 CHECK_FIELD(idtr.pIdt);
10513 CHECK_SEL(ldtr);
10514 CHECK_SEL(tr);
10515 CHECK_FIELD(SysEnter.cs);
10516 CHECK_FIELD(SysEnter.eip);
10517 CHECK_FIELD(SysEnter.esp);
10518 CHECK_FIELD(msrEFER);
10519 CHECK_FIELD(msrSTAR);
10520 CHECK_FIELD(msrPAT);
10521 CHECK_FIELD(msrLSTAR);
10522 CHECK_FIELD(msrCSTAR);
10523 CHECK_FIELD(msrSFMASK);
10524 CHECK_FIELD(msrKERNELGSBASE);
10525
10526 if (cDiffs != 0)
10527 {
10528 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10529 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10530 iemVerifyAssertMsg2(pIemCpu);
10531 RTAssertPanic();
10532 }
10533# undef CHECK_FIELD
10534# undef CHECK_BIT_FIELD
10535 }
10536
10537 /*
10538 * If the register state compared fine, check the verification event
10539 * records.
10540 */
10541 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10542 {
10543 /*
10544 * Compare verficiation event records.
10545 * - I/O port accesses should be a 1:1 match.
10546 */
10547 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10548 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10549 while (pIemRec && pOtherRec)
10550 {
10551 /* Since we might miss RAM writes and reads, ignore reads and check
10552 that any written memory is the same extra ones. */
10553 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10554 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10555 && pIemRec->pNext)
10556 {
10557 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10558 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10559 pIemRec = pIemRec->pNext;
10560 }
10561
10562 /* Do the compare. */
10563 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10564 {
10565 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10566 break;
10567 }
10568 bool fEquals;
10569 switch (pIemRec->enmEvent)
10570 {
10571 case IEMVERIFYEVENT_IOPORT_READ:
10572 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10573 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10574 break;
10575 case IEMVERIFYEVENT_IOPORT_WRITE:
10576 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10577 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10578 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10579 break;
10580 case IEMVERIFYEVENT_RAM_READ:
10581 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10582 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10583 break;
10584 case IEMVERIFYEVENT_RAM_WRITE:
10585 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10586 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10587 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10588 break;
10589 default:
10590 fEquals = false;
10591 break;
10592 }
10593 if (!fEquals)
10594 {
10595 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10596 break;
10597 }
10598
10599 /* advance */
10600 pIemRec = pIemRec->pNext;
10601 pOtherRec = pOtherRec->pNext;
10602 }
10603
10604 /* Ignore extra writes and reads. */
10605 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10606 {
10607 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10608 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10609 pIemRec = pIemRec->pNext;
10610 }
10611 if (pIemRec != NULL)
10612 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10613 else if (pOtherRec != NULL)
10614 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10615 }
10616 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10617}
10618
10619#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10620
10621/* stubs */
10622IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10623{
10624 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10625 return VERR_INTERNAL_ERROR;
10626}
10627
10628IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10629{
10630 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10631 return VERR_INTERNAL_ERROR;
10632}
10633
10634#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10635
10636
10637#ifdef LOG_ENABLED
10638/**
10639 * Logs the current instruction.
10640 * @param pVCpu The cross context virtual CPU structure of the caller.
10641 * @param pCtx The current CPU context.
10642 * @param fSameCtx Set if we have the same context information as the VMM,
10643 * clear if we may have already executed an instruction in
10644 * our debug context. When clear, we assume IEMCPU holds
10645 * valid CPU mode info.
10646 */
10647IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10648{
10649# ifdef IN_RING3
10650 if (LogIs2Enabled())
10651 {
10652 char szInstr[256];
10653 uint32_t cbInstr = 0;
10654 if (fSameCtx)
10655 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10656 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10657 szInstr, sizeof(szInstr), &cbInstr);
10658 else
10659 {
10660 uint32_t fFlags = 0;
10661 switch (pVCpu->iem.s.enmCpuMode)
10662 {
10663 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10664 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10665 case IEMMODE_16BIT:
10666 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10667 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10668 else
10669 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10670 break;
10671 }
10672 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10673 szInstr, sizeof(szInstr), &cbInstr);
10674 }
10675
10676 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10677 Log2(("****\n"
10678 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10679 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10680 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10681 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10682 " %s\n"
10683 ,
10684 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10685 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10686 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10687 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10688 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10689 szInstr));
10690
10691 if (LogIs3Enabled())
10692 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10693 }
10694 else
10695# endif
10696 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10697 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10698}
10699#endif
10700
10701
10702/**
10703 * Makes status code addjustments (pass up from I/O and access handler)
10704 * as well as maintaining statistics.
10705 *
10706 * @returns Strict VBox status code to pass up.
10707 * @param pIemCpu The IEM per CPU data.
10708 * @param rcStrict The status from executing an instruction.
10709 */
10710DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10711{
10712 if (rcStrict != VINF_SUCCESS)
10713 {
10714 if (RT_SUCCESS(rcStrict))
10715 {
10716 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10717 || rcStrict == VINF_IOM_R3_IOPORT_READ
10718 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10719 || rcStrict == VINF_IOM_R3_MMIO_READ
10720 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10721 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10722 || rcStrict == VINF_CPUM_R3_MSR_READ
10723 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10724 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10725 || rcStrict == VINF_EM_RAW_TO_R3
10726 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
10727 /* raw-mode / virt handlers only: */
10728 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10729 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10730 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10731 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10732 || rcStrict == VINF_SELM_SYNC_GDT
10733 || rcStrict == VINF_CSAM_PENDING_ACTION
10734 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10735 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10736/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10737 int32_t const rcPassUp = pIemCpu->rcPassUp;
10738 if (rcPassUp == VINF_SUCCESS)
10739 pIemCpu->cRetInfStatuses++;
10740 else if ( rcPassUp < VINF_EM_FIRST
10741 || rcPassUp > VINF_EM_LAST
10742 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10743 {
10744 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10745 pIemCpu->cRetPassUpStatus++;
10746 rcStrict = rcPassUp;
10747 }
10748 else
10749 {
10750 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10751 pIemCpu->cRetInfStatuses++;
10752 }
10753 }
10754 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10755 pIemCpu->cRetAspectNotImplemented++;
10756 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10757 pIemCpu->cRetInstrNotImplemented++;
10758#ifdef IEM_VERIFICATION_MODE_FULL
10759 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10760 rcStrict = VINF_SUCCESS;
10761#endif
10762 else
10763 pIemCpu->cRetErrStatuses++;
10764 }
10765 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10766 {
10767 pIemCpu->cRetPassUpStatus++;
10768 rcStrict = pIemCpu->rcPassUp;
10769 }
10770
10771 return rcStrict;
10772}
10773
10774
10775/**
10776 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10777 * IEMExecOneWithPrefetchedByPC.
10778 *
10779 * @return Strict VBox status code.
10780 * @param pVCpu The current virtual CPU.
10781 * @param pIemCpu The IEM per CPU data.
10782 * @param fExecuteInhibit If set, execute the instruction following CLI,
10783 * POP SS and MOV SS,GR.
10784 */
10785DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10786{
10787 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10788 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10789 if (rcStrict == VINF_SUCCESS)
10790 pIemCpu->cInstructions++;
10791 if (pIemCpu->cActiveMappings > 0)
10792 iemMemRollback(pIemCpu);
10793//#ifdef DEBUG
10794// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10795//#endif
10796
10797 /* Execute the next instruction as well if a cli, pop ss or
10798 mov ss, Gr has just completed successfully. */
10799 if ( fExecuteInhibit
10800 && rcStrict == VINF_SUCCESS
10801 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10802 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10803 {
10804 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10805 if (rcStrict == VINF_SUCCESS)
10806 {
10807# ifdef LOG_ENABLED
10808 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10809# endif
10810 IEM_OPCODE_GET_NEXT_U8(&b);
10811 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10812 if (rcStrict == VINF_SUCCESS)
10813 pIemCpu->cInstructions++;
10814 if (pIemCpu->cActiveMappings > 0)
10815 iemMemRollback(pIemCpu);
10816 }
10817 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10818 }
10819
10820 /*
10821 * Return value fiddling, statistics and sanity assertions.
10822 */
10823 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10824
10825 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10826 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10827#if defined(IEM_VERIFICATION_MODE_FULL)
10828 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10829 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10830 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10831 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10832#endif
10833 return rcStrict;
10834}
10835
10836
10837#ifdef IN_RC
10838/**
10839 * Re-enters raw-mode or ensure we return to ring-3.
10840 *
10841 * @returns rcStrict, maybe modified.
10842 * @param pIemCpu The IEM CPU structure.
10843 * @param pVCpu The cross context virtual CPU structure of the caller.
10844 * @param pCtx The current CPU context.
10845 * @param rcStrict The status code returne by the interpreter.
10846 */
10847DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10848{
10849 if (!pIemCpu->fInPatchCode)
10850 CPUMRawEnter(pVCpu);
10851 return rcStrict;
10852}
10853#endif
10854
10855
10856/**
10857 * Execute one instruction.
10858 *
10859 * @return Strict VBox status code.
10860 * @param pVCpu The current virtual CPU.
10861 */
10862VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10863{
10864 PIEMCPU pIemCpu = &pVCpu->iem.s;
10865
10866#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10867 iemExecVerificationModeSetup(pIemCpu);
10868#endif
10869#ifdef LOG_ENABLED
10870 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10871 iemLogCurInstr(pVCpu, pCtx, true);
10872#endif
10873
10874 /*
10875 * Do the decoding and emulation.
10876 */
10877 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10878 if (rcStrict == VINF_SUCCESS)
10879 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10880
10881#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10882 /*
10883 * Assert some sanity.
10884 */
10885 iemExecVerificationModeCheck(pIemCpu);
10886#endif
10887#ifdef IN_RC
10888 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10889#endif
10890 if (rcStrict != VINF_SUCCESS)
10891 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10892 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10893 return rcStrict;
10894}
10895
10896
10897VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10898{
10899 PIEMCPU pIemCpu = &pVCpu->iem.s;
10900 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10901 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10902
10903 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10904 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10905 if (rcStrict == VINF_SUCCESS)
10906 {
10907 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10908 if (pcbWritten)
10909 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10910 }
10911
10912#ifdef IN_RC
10913 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10914#endif
10915 return rcStrict;
10916}
10917
10918
10919VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10920 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10921{
10922 PIEMCPU pIemCpu = &pVCpu->iem.s;
10923 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10924 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10925
10926 VBOXSTRICTRC rcStrict;
10927 if ( cbOpcodeBytes
10928 && pCtx->rip == OpcodeBytesPC)
10929 {
10930 iemInitDecoder(pIemCpu, false);
10931 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10932 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10933 rcStrict = VINF_SUCCESS;
10934 }
10935 else
10936 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10937 if (rcStrict == VINF_SUCCESS)
10938 {
10939 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10940 }
10941
10942#ifdef IN_RC
10943 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10944#endif
10945 return rcStrict;
10946}
10947
10948
10949VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10950{
10951 PIEMCPU pIemCpu = &pVCpu->iem.s;
10952 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10953 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10954
10955 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10956 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10957 if (rcStrict == VINF_SUCCESS)
10958 {
10959 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10960 if (pcbWritten)
10961 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10962 }
10963
10964#ifdef IN_RC
10965 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10966#endif
10967 return rcStrict;
10968}
10969
10970
10971VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10972 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10973{
10974 PIEMCPU pIemCpu = &pVCpu->iem.s;
10975 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10976 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10977
10978 VBOXSTRICTRC rcStrict;
10979 if ( cbOpcodeBytes
10980 && pCtx->rip == OpcodeBytesPC)
10981 {
10982 iemInitDecoder(pIemCpu, true);
10983 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10984 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10985 rcStrict = VINF_SUCCESS;
10986 }
10987 else
10988 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10989 if (rcStrict == VINF_SUCCESS)
10990 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10991
10992#ifdef IN_RC
10993 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10994#endif
10995 return rcStrict;
10996}
10997
10998
10999VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11000{
11001 PIEMCPU pIemCpu = &pVCpu->iem.s;
11002
11003 /*
11004 * See if there is an interrupt pending in TRPM and inject it if we can.
11005 */
11006#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11007 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11008# ifdef IEM_VERIFICATION_MODE_FULL
11009 pIemCpu->uInjectCpl = UINT8_MAX;
11010# endif
11011 if ( pCtx->eflags.Bits.u1IF
11012 && TRPMHasTrap(pVCpu)
11013 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11014 {
11015 uint8_t u8TrapNo;
11016 TRPMEVENT enmType;
11017 RTGCUINT uErrCode;
11018 RTGCPTR uCr2;
11019 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11020 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11021 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11022 TRPMResetTrap(pVCpu);
11023 }
11024#else
11025 iemExecVerificationModeSetup(pIemCpu);
11026 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11027#endif
11028
11029 /*
11030 * Log the state.
11031 */
11032#ifdef LOG_ENABLED
11033 iemLogCurInstr(pVCpu, pCtx, true);
11034#endif
11035
11036 /*
11037 * Do the decoding and emulation.
11038 */
11039 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11040 if (rcStrict == VINF_SUCCESS)
11041 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11042
11043#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11044 /*
11045 * Assert some sanity.
11046 */
11047 iemExecVerificationModeCheck(pIemCpu);
11048#endif
11049
11050 /*
11051 * Maybe re-enter raw-mode and log.
11052 */
11053#ifdef IN_RC
11054 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11055#endif
11056 if (rcStrict != VINF_SUCCESS)
11057 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11058 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11059 return rcStrict;
11060}
11061
11062
11063
11064/**
11065 * Injects a trap, fault, abort, software interrupt or external interrupt.
11066 *
11067 * The parameter list matches TRPMQueryTrapAll pretty closely.
11068 *
11069 * @returns Strict VBox status code.
11070 * @param pVCpu The current virtual CPU.
11071 * @param u8TrapNo The trap number.
11072 * @param enmType What type is it (trap/fault/abort), software
11073 * interrupt or hardware interrupt.
11074 * @param uErrCode The error code if applicable.
11075 * @param uCr2 The CR2 value if applicable.
11076 * @param cbInstr The instruction length (only relevant for
11077 * software interrupts).
11078 */
11079VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11080 uint8_t cbInstr)
11081{
11082 iemInitDecoder(&pVCpu->iem.s, false);
11083#ifdef DBGFTRACE_ENABLED
11084 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11085 u8TrapNo, enmType, uErrCode, uCr2);
11086#endif
11087
11088 uint32_t fFlags;
11089 switch (enmType)
11090 {
11091 case TRPM_HARDWARE_INT:
11092 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11093 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11094 uErrCode = uCr2 = 0;
11095 break;
11096
11097 case TRPM_SOFTWARE_INT:
11098 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11099 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11100 uErrCode = uCr2 = 0;
11101 break;
11102
11103 case TRPM_TRAP:
11104 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11105 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11106 if (u8TrapNo == X86_XCPT_PF)
11107 fFlags |= IEM_XCPT_FLAGS_CR2;
11108 switch (u8TrapNo)
11109 {
11110 case X86_XCPT_DF:
11111 case X86_XCPT_TS:
11112 case X86_XCPT_NP:
11113 case X86_XCPT_SS:
11114 case X86_XCPT_PF:
11115 case X86_XCPT_AC:
11116 fFlags |= IEM_XCPT_FLAGS_ERR;
11117 break;
11118
11119 case X86_XCPT_NMI:
11120 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11121 break;
11122 }
11123 break;
11124
11125 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11126 }
11127
11128 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11129}
11130
11131
11132/**
11133 * Injects the active TRPM event.
11134 *
11135 * @returns Strict VBox status code.
11136 * @param pVCpu Pointer to the VMCPU.
11137 */
11138VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11139{
11140#ifndef IEM_IMPLEMENTS_TASKSWITCH
11141 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11142#else
11143 uint8_t u8TrapNo;
11144 TRPMEVENT enmType;
11145 RTGCUINT uErrCode;
11146 RTGCUINTPTR uCr2;
11147 uint8_t cbInstr;
11148 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11149 if (RT_FAILURE(rc))
11150 return rc;
11151
11152 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11153
11154 /** @todo Are there any other codes that imply the event was successfully
11155 * delivered to the guest? See @bugref{6607}. */
11156 if ( rcStrict == VINF_SUCCESS
11157 || rcStrict == VINF_IEM_RAISED_XCPT)
11158 {
11159 TRPMResetTrap(pVCpu);
11160 }
11161 return rcStrict;
11162#endif
11163}
11164
11165
11166VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11167{
11168 return VERR_NOT_IMPLEMENTED;
11169}
11170
11171
11172VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11173{
11174 return VERR_NOT_IMPLEMENTED;
11175}
11176
11177
11178#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11179/**
11180 * Executes a IRET instruction with default operand size.
11181 *
11182 * This is for PATM.
11183 *
11184 * @returns VBox status code.
11185 * @param pVCpu The current virtual CPU.
11186 * @param pCtxCore The register frame.
11187 */
11188VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11189{
11190 PIEMCPU pIemCpu = &pVCpu->iem.s;
11191 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11192
11193 iemCtxCoreToCtx(pCtx, pCtxCore);
11194 iemInitDecoder(pIemCpu);
11195 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11196 if (rcStrict == VINF_SUCCESS)
11197 iemCtxToCtxCore(pCtxCore, pCtx);
11198 else
11199 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11200 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11201 return rcStrict;
11202}
11203#endif
11204
11205
11206/**
11207 * Macro used by the IEMExec* method to check the given instruction length.
11208 *
11209 * Will return on failure!
11210 *
11211 * @param a_cbInstr The given instruction length.
11212 * @param a_cbMin The minimum length.
11213 */
11214#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11215 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11216 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11217
11218
11219/**
11220 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11221 *
11222 * This API ASSUMES that the caller has already verified that the guest code is
11223 * allowed to access the I/O port. (The I/O port is in the DX register in the
11224 * guest state.)
11225 *
11226 * @returns Strict VBox status code.
11227 * @param pVCpu The cross context per virtual CPU structure.
11228 * @param cbValue The size of the I/O port access (1, 2, or 4).
11229 * @param enmAddrMode The addressing mode.
11230 * @param fRepPrefix Indicates whether a repeat prefix is used
11231 * (doesn't matter which for this instruction).
11232 * @param cbInstr The instruction length in bytes.
11233 * @param iEffSeg The effective segment address.
11234 */
11235VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11236 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11237{
11238 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11239 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11240
11241 /*
11242 * State init.
11243 */
11244 PIEMCPU pIemCpu = &pVCpu->iem.s;
11245 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11246
11247 /*
11248 * Switch orgy for getting to the right handler.
11249 */
11250 VBOXSTRICTRC rcStrict;
11251 if (fRepPrefix)
11252 {
11253 switch (enmAddrMode)
11254 {
11255 case IEMMODE_16BIT:
11256 switch (cbValue)
11257 {
11258 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11259 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11260 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11261 default:
11262 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11263 }
11264 break;
11265
11266 case IEMMODE_32BIT:
11267 switch (cbValue)
11268 {
11269 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11270 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11271 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11272 default:
11273 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11274 }
11275 break;
11276
11277 case IEMMODE_64BIT:
11278 switch (cbValue)
11279 {
11280 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11281 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11282 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11283 default:
11284 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11285 }
11286 break;
11287
11288 default:
11289 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11290 }
11291 }
11292 else
11293 {
11294 switch (enmAddrMode)
11295 {
11296 case IEMMODE_16BIT:
11297 switch (cbValue)
11298 {
11299 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11300 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11301 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11302 default:
11303 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11304 }
11305 break;
11306
11307 case IEMMODE_32BIT:
11308 switch (cbValue)
11309 {
11310 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11311 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11312 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11313 default:
11314 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11315 }
11316 break;
11317
11318 case IEMMODE_64BIT:
11319 switch (cbValue)
11320 {
11321 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11322 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11323 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11324 default:
11325 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11326 }
11327 break;
11328
11329 default:
11330 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11331 }
11332 }
11333
11334 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11335}
11336
11337
11338/**
11339 * Interface for HM and EM for executing string I/O IN (read) instructions.
11340 *
11341 * This API ASSUMES that the caller has already verified that the guest code is
11342 * allowed to access the I/O port. (The I/O port is in the DX register in the
11343 * guest state.)
11344 *
11345 * @returns Strict VBox status code.
11346 * @param pVCpu The cross context per virtual CPU structure.
11347 * @param cbValue The size of the I/O port access (1, 2, or 4).
11348 * @param enmAddrMode The addressing mode.
11349 * @param fRepPrefix Indicates whether a repeat prefix is used
11350 * (doesn't matter which for this instruction).
11351 * @param cbInstr The instruction length in bytes.
11352 */
11353VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11354 bool fRepPrefix, uint8_t cbInstr)
11355{
11356 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11357
11358 /*
11359 * State init.
11360 */
11361 PIEMCPU pIemCpu = &pVCpu->iem.s;
11362 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11363
11364 /*
11365 * Switch orgy for getting to the right handler.
11366 */
11367 VBOXSTRICTRC rcStrict;
11368 if (fRepPrefix)
11369 {
11370 switch (enmAddrMode)
11371 {
11372 case IEMMODE_16BIT:
11373 switch (cbValue)
11374 {
11375 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11376 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11377 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11378 default:
11379 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11380 }
11381 break;
11382
11383 case IEMMODE_32BIT:
11384 switch (cbValue)
11385 {
11386 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11387 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11388 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11389 default:
11390 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11391 }
11392 break;
11393
11394 case IEMMODE_64BIT:
11395 switch (cbValue)
11396 {
11397 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11398 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11399 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11400 default:
11401 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11402 }
11403 break;
11404
11405 default:
11406 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11407 }
11408 }
11409 else
11410 {
11411 switch (enmAddrMode)
11412 {
11413 case IEMMODE_16BIT:
11414 switch (cbValue)
11415 {
11416 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11417 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11418 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11419 default:
11420 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11421 }
11422 break;
11423
11424 case IEMMODE_32BIT:
11425 switch (cbValue)
11426 {
11427 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11428 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11429 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11430 default:
11431 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11432 }
11433 break;
11434
11435 case IEMMODE_64BIT:
11436 switch (cbValue)
11437 {
11438 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11439 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11440 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11441 default:
11442 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11443 }
11444 break;
11445
11446 default:
11447 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11448 }
11449 }
11450
11451 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11452}
11453
11454
11455
11456/**
11457 * Interface for HM and EM to write to a CRx register.
11458 *
11459 * @returns Strict VBox status code.
11460 * @param pVCpu The cross context per virtual CPU structure.
11461 * @param cbInstr The instruction length in bytes.
11462 * @param iCrReg The control register number (destination).
11463 * @param iGReg The general purpose register number (source).
11464 *
11465 * @remarks In ring-0 not all of the state needs to be synced in.
11466 */
11467VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11468{
11469 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11470 Assert(iCrReg < 16);
11471 Assert(iGReg < 16);
11472
11473 PIEMCPU pIemCpu = &pVCpu->iem.s;
11474 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11475 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11476 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11477}
11478
11479
11480/**
11481 * Interface for HM and EM to read from a CRx register.
11482 *
11483 * @returns Strict VBox status code.
11484 * @param pVCpu The cross context per virtual CPU structure.
11485 * @param cbInstr The instruction length in bytes.
11486 * @param iGReg The general purpose register number (destination).
11487 * @param iCrReg The control register number (source).
11488 *
11489 * @remarks In ring-0 not all of the state needs to be synced in.
11490 */
11491VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11492{
11493 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11494 Assert(iCrReg < 16);
11495 Assert(iGReg < 16);
11496
11497 PIEMCPU pIemCpu = &pVCpu->iem.s;
11498 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11499 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11500 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11501}
11502
11503
11504/**
11505 * Interface for HM and EM to clear the CR0[TS] bit.
11506 *
11507 * @returns Strict VBox status code.
11508 * @param pVCpu The cross context per virtual CPU structure.
11509 * @param cbInstr The instruction length in bytes.
11510 *
11511 * @remarks In ring-0 not all of the state needs to be synced in.
11512 */
11513VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11514{
11515 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11516
11517 PIEMCPU pIemCpu = &pVCpu->iem.s;
11518 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11519 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11520 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11521}
11522
11523
11524/**
11525 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11526 *
11527 * @returns Strict VBox status code.
11528 * @param pVCpu The cross context per virtual CPU structure.
11529 * @param cbInstr The instruction length in bytes.
11530 * @param uValue The value to load into CR0.
11531 *
11532 * @remarks In ring-0 not all of the state needs to be synced in.
11533 */
11534VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11535{
11536 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11537
11538 PIEMCPU pIemCpu = &pVCpu->iem.s;
11539 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11540 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11541 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11542}
11543
11544
11545/**
11546 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11547 *
11548 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11549 *
11550 * @returns Strict VBox status code.
11551 * @param pVCpu The cross context per virtual CPU structure of the
11552 * calling EMT.
11553 * @param cbInstr The instruction length in bytes.
11554 * @remarks In ring-0 not all of the state needs to be synced in.
11555 * @threads EMT(pVCpu)
11556 */
11557VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11558{
11559 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11560
11561 PIEMCPU pIemCpu = &pVCpu->iem.s;
11562 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11563 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11564 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11565}
11566
11567#ifdef IN_RING3
11568
11569/**
11570 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11571 *
11572 * @returns Merge between @a rcStrict and what the commit operation returned.
11573 * @param pVCpu Pointer to the cross context CPU structure for the
11574 * calling EMT.
11575 * @param rcStrict The status code returned by ring-0 or raw-mode.
11576 */
11577VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3DoPendingAction(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
11578{
11579 PIEMCPU pIemCpu = &pVCpu->iem.s;
11580
11581 /*
11582 * Retrieve and reset the pending commit.
11583 */
11584 IEMCOMMIT const enmFn = pIemCpu->PendingCommit.enmFn;
11585 pIemCpu->PendingCommit.enmFn = IEMCOMMIT_INVALID;
11586 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11587
11588 /*
11589 * Must reset pass-up status code.
11590 */
11591 pIemCpu->rcPassUp = VINF_SUCCESS;
11592
11593 /*
11594 * Call the function. Currently using switch here instead of function
11595 * pointer table as a switch won't get skewed.
11596 */
11597 VBOXSTRICTRC rcStrictCommit;
11598 switch (enmFn)
11599 {
11600 case IEMCOMMIT_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11601 case IEMCOMMIT_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11602 case IEMCOMMIT_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11603 case IEMCOMMIT_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11604 case IEMCOMMIT_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11605 case IEMCOMMIT_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11606 case IEMCOMMIT_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11607 case IEMCOMMIT_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11608 case IEMCOMMIT_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11609 case IEMCOMMIT_REP_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11610 case IEMCOMMIT_REP_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11611 case IEMCOMMIT_REP_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11612 case IEMCOMMIT_REP_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11613 case IEMCOMMIT_REP_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11614 case IEMCOMMIT_REP_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11615 case IEMCOMMIT_REP_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11616 case IEMCOMMIT_REP_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11617 case IEMCOMMIT_REP_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11618 default:
11619 AssertLogRelMsgFailedReturn(("enmFn=%#x (%d)\n", pIemCpu->PendingCommit.enmFn, pIemCpu->PendingCommit.enmFn), VERR_IEM_IPE_2);
11620 }
11621
11622 /*
11623 * Merge status code (if any) with the incomming one.
11624 */
11625 rcStrictCommit = iemExecStatusCodeFiddling(pIemCpu, rcStrictCommit);
11626 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11627 return rcStrict;
11628 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11629 return rcStrictCommit;
11630
11631 /* Complicated. */
11632 if (RT_FAILURE(rcStrict))
11633 return rcStrict;
11634 if (RT_FAILURE(rcStrictCommit))
11635 return rcStrictCommit;
11636 if ( rcStrict >= VINF_EM_FIRST
11637 && rcStrict <= VINF_EM_LAST)
11638 {
11639 if ( rcStrictCommit >= VINF_EM_FIRST
11640 && rcStrictCommit <= VINF_EM_LAST)
11641 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11642
11643 /* This really shouldn't happen. Check PGM + handler code! */
11644 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_1);
11645 }
11646 /* This shouldn't really happen either, see IOM_SUCCESS. */
11647 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_2);
11648}
11649
11650#endif /* IN_RING */
11651
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette